aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb11
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-laptop12
-rw-r--r--Documentation/ABI/testing/sysfs-platform-eeepc-laptop10
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/deviceiobook.tmpl2
-rw-r--r--Documentation/cdrom/ide-cd39
-rw-r--r--Documentation/feature-removal-schedule.txt44
-rw-r--r--Documentation/filesystems/nilfs2.txt3
-rw-r--r--Documentation/filesystems/sharedsubtree.txt16
-rw-r--r--Documentation/i2c/busses/i2c-i8013
-rw-r--r--Documentation/i2c/busses/i2c-parport3
-rw-r--r--Documentation/i2c/busses/i2c-parport-light11
-rw-r--r--Documentation/i2c/smbus-protocol16
-rw-r--r--Documentation/i2c/writing-clients5
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/kprobes.txt207
-rw-r--r--Documentation/kvm/api.txt12
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt4
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/cxacru-cf.py48
-rw-r--r--Documentation/networking/cxacru.txt16
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/dma.txt8
-rw-r--r--Documentation/usb/error-codes.txt6
-rw-r--r--Documentation/usb/power-management.txt235
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/Kconfig13
-rw-r--r--arch/alpha/include/asm/local.h17
-rw-r--r--arch/arm/configs/rx51_defconfig11
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h12
-rw-r--r--arch/arm/mach-mx2/devices.c80
-rw-r--r--arch/arm/mach-mx2/devices.h1
-rw-r--r--arch/arm/mach-u300/include/mach/coh901318.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mx21-usbhost.h38
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c7
-rw-r--r--arch/blackfin/mach-common/entry.S4
-rw-r--r--arch/cris/arch-v10/kernel/entry.S2
-rw-r--r--arch/cris/arch-v32/mm/mmu.S2
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/include/asm/xen/events.h4
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c50
-rw-r--r--arch/ia64/kvm/kvm_fw.c28
-rw-r--r--arch/ia64/kvm/mmio.c4
-rw-r--r--arch/ia64/kvm/vcpu.c4
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/m32r/include/asm/local.h25
-rw-r--r--arch/microblaze/include/asm/entry.h2
-rw-r--r--arch/mips/include/asm/local.h25
-rw-r--r--arch/parisc/lib/fixup.S8
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h11
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h23
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h83
-rw-r--r--arch/powerpc/include/asm/local.h25
-rw-r--r--arch/powerpc/include/asm/paca.h5
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c33
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kvm/44x_emulate.c25
-rw-r--r--arch/powerpc/kvm/44x_tlb.c20
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c309
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c77
-rw-r--r--arch/powerpc/kvm/book3s_64_exports.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S336
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_rmhandlers.S119
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S160
-rw-r--r--arch/powerpc/kvm/booke.c87
-rw-r--r--arch/powerpc/kvm/booke_emulate.c107
-rw-r--r--arch/powerpc/kvm/e500.c6
-rw-r--r--arch/powerpc/kvm/e500_emulate.c93
-rw-r--r--arch/powerpc/kvm/e500_tlb.c10
-rw-r--r--arch/powerpc/kvm/emulate.c118
-rw-r--r--arch/powerpc/kvm/powerpc.c40
-rw-r--r--arch/s390/hypfs/inode.c42
-rw-r--r--arch/s390/kvm/kvm-s390.c26
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/sparc/configs/sparc32_defconfig56
-rw-r--r--arch/sparc/configs/sparc64_defconfig34
-rw-r--r--arch/sparc/include/asm/io_32.h4
-rw-r--r--arch/sparc/include/asm/io_64.h4
-rw-r--r--arch/sparc/include/asm/perfctr.h4
-rw-r--r--arch/sparc/include/asm/system_64.h15
-rw-r--r--arch/sparc/include/asm/thread_info_64.h25
-rw-r--r--arch/sparc/kernel/entry.h1
-rw-r--r--arch/sparc/kernel/nmi.c7
-rw-r--r--arch/sparc/kernel/process_64.c23
-rw-r--r--arch/sparc/kernel/rtrap_64.S62
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c104
-rw-r--r--arch/sparc/kernel/syscalls.S23
-rw-r--r--arch/sparc/kernel/systbls.h2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/traps_64.c9
-rw-r--r--arch/sparc/prom/p1275.c12
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/x86/Kconfig16
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/e820.h5
-rw-r--r--arch/x86/include/asm/highmem.h4
-rw-r--r--arch/x86/include/asm/hyperv.h186
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h48
-rw-r--r--arch/x86/include/asm/kprobes.h31
-rw-r--r--arch/x86/include/asm/kvm_emulate.h17
-rw-r--r--arch/x86/include/asm/kvm_host.h60
-rw-r--r--arch/x86/include/asm/kvm_para.h1
-rw-r--r--arch/x86/include/asm/local.h37
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/paravirt_types.h4
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/pci_64.h2
-rw-r--r--arch/x86/include/asm/percpu.h119
-rw-r--r--arch/x86/include/asm/pgtable_32.h4
-rw-r--r--arch/x86/include/asm/proto.h10
-rw-r--r--arch/x86/include/asm/svm.h2
-rw-r--r--arch/x86/include/asm/system.h8
-rw-r--r--arch/x86/include/asm/vmx.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/alternative.c60
-rw-r--r--arch/x86/kernel/apic/io_apic.c258
-rw-r--r--arch/x86/kernel/apic/nmi.c12
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c208
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c1
-rw-r--r--arch/x86/kernel/e820.c349
-rw-r--r--arch/x86/kernel/head32.c10
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kernel/i8259.c30
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/kprobes.c609
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c7
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kernel/pci-dma.c13
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/setup_percpu.c6
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmi_32.c35
-rw-r--r--arch/x86/kernel/vmiclock_32.c6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kernel/vsyscall_64.c3
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/emulate.c440
-rw-r--r--arch/x86/kvm/i8254.c23
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c46
-rw-r--r--arch/x86/kvm/irq.h3
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h31
-rw-r--r--arch/x86/kvm/lapic.c31
-rw-r--r--arch/x86/kvm/lapic.h8
-rw-r--r--arch/x86/kvm/mmu.c137
-rw-r--r--arch/x86/kvm/mmu.h35
-rw-r--r--arch/x86/kvm/paging_tmpl.h13
-rw-r--r--arch/x86/kvm/svm.c237
-rw-r--r--arch/x86/kvm/trace.h59
-rw-r--r--arch/x86/kvm/vmx.c396
-rw-r--r--arch/x86/kvm/x86.c1098
-rw-r--r--arch/x86/kvm/x86.h30
-rw-r--r--arch/x86/mm/init_32.c7
-rw-r--r--arch/x86/mm/init_64.c9
-rw-r--r--arch/x86/mm/numa_32.c3
-rw-r--r--arch/x86/mm/numa_64.c97
-rw-r--r--arch/x86/pci/Makefile3
-rw-r--r--arch/x86/pci/amd_bus.c127
-rw-r--r--arch/x86/pci/bus_numa.c25
-rw-r--r--arch/x86/pci/bus_numa.h9
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/xen/enlighten.c7
-rw-r--r--arch/x86/xen/mmu.c21
-rw-r--r--arch/x86/xen/xen-asm_32.S4
-rw-r--r--crypto/ahash.c1
-rw-r--r--crypto/authenc.c27
-rw-r--r--crypto/cryptd.c2
-rw-r--r--crypto/md5.c1
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/char/agp/intel-agp.c123
-rw-r--r--drivers/char/cyclades.c16
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/ip2/ip2main.c26
-rw-r--r--drivers/char/isicom.c54
-rw-r--r--drivers/char/moxa.c20
-rw-r--r--drivers/char/mxser.c3
-rw-r--r--drivers/char/nozomi.c157
-rw-r--r--drivers/char/serial167.c3
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/synclink.c4
-rw-r--r--drivers/char/synclink_gt.c186
-rw-r--r--drivers/char/tty_buffer.c17
-rw-r--r--drivers/char/tty_ldisc.c50
-rw-r--r--drivers/char/vt_ioctl.c39
-rw-r--r--drivers/dma/Kconfig23
-rw-r--r--drivers/dma/Makefile8
-rw-r--r--drivers/dma/coh901318.c182
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c1177
-rw-r--r--drivers/dma/fsldma.h35
-rw-r--r--drivers/dma/ioat/dma.c46
-rw-r--r--drivers/dma/ioat/dma.h11
-rw-r--r--drivers/dma/ioat/dma_v2.c70
-rw-r--r--drivers/dma/ioat/dma_v2.h6
-rw-r--r--drivers/dma/ioat/dma_v3.c64
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c15
-rw-r--r--drivers/dma/mpc512x_dma.c800
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/edac/amd64_edac.c39
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/firewire/core-cdev.c368
-rw-r--r--drivers/firewire/core-device.c198
-rw-r--r--drivers/firewire/core-transaction.c17
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/ohci.c364
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_buffer.c184
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c44
-rw-r--r--drivers/gpu/drm/drm_edid.c30
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_gem.c70
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c253
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c326
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h69
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c430
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c169
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c313
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h170
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c41
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c29
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c23
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c339
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h126
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c508
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c74
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2367
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile9
-rw-r--r--drivers/gpu/drm/radeon/atom.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h7300
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c456
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c64
-rw-r--r--drivers/gpu/drm/radeon/avivod.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c767
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h176
-rw-r--r--drivers/gpu/drm/radeon/r100.c176
-rw-r--r--drivers/gpu/drm/radeon/r200.c46
-rw-r--r--drivers/gpu/drm/radeon/r300.c157
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c280
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h100
-rw-r--r--drivers/gpu/drm/radeon/r520.c21
-rw-r--r--drivers/gpu/drm/radeon/r600.c190
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c262
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c831
-rw-r--r--drivers/gpu/drm/radeon/r600d.h467
-rw-r--r--drivers/gpu/drm/radeon/radeon.h167
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h172
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c435
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c290
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c235
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c354
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c768
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c399
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c203
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600837
-rw-r--r--drivers/gpu/drm/radeon/rs400.c39
-rw-r--r--drivers/gpu/drm/radeon/rs600.c56
-rw-r--r--drivers/gpu/drm/radeon/rs690.c41
-rw-r--r--drivers/gpu/drm/radeon/rv515.c21
-rw-r--r--drivers/gpu/drm/radeon/rv770.c259
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/vga/Kconfig11
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c450
-rw-r--r--drivers/hid/usbhid/hiddev.c7
-rw-r--r--drivers/i2c/Kconfig10
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/Kconfig7
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c7
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c48
-rw-r--r--drivers/i2c/busses/i2c-parport.c43
-rw-r--r--drivers/i2c/busses/i2c-parport.h4
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c10
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/i2c-core.c54
-rw-r--r--drivers/i2c/i2c-smbus.c263
-rw-r--r--drivers/ide/aec62xx.c13
-rw-r--r--drivers/ide/ali14xx.c3
-rw-r--r--drivers/ide/alim15x3.c171
-rw-r--r--drivers/ide/amd74xx.c18
-rw-r--r--drivers/ide/at91_ide.c5
-rw-r--r--drivers/ide/atiixp.c14
-rw-r--r--drivers/ide/au1xxx-ide.c13
-rw-r--r--drivers/ide/cmd640.c5
-rw-r--r--drivers/ide/cmd64x.c114
-rw-r--r--drivers/ide/cs5520.c9
-rw-r--r--drivers/ide/cs5530.c13
-rw-r--r--drivers/ide/cs5535.c14
-rw-r--r--drivers/ide/cs5536.c16
-rw-r--r--drivers/ide/cy82c693.c146
-rw-r--r--drivers/ide/dtc2278.c4
-rw-r--r--drivers/ide/hpt366.c9
-rw-r--r--drivers/ide/ht6560b.c3
-rw-r--r--drivers/ide/icside.c67
-rw-r--r--drivers/ide/ide-cs.c23
-rw-r--r--drivers/ide/ide-devsets.c6
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-tape.c14
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/ide/ide-xfer-mode.c18
-rw-r--r--drivers/ide/it8172.c14
-rw-r--r--drivers/ide/it8213.c20
-rw-r--r--drivers/ide/it821x.c14
-rw-r--r--drivers/ide/jmicron.c6
-rw-r--r--drivers/ide/opti621.c77
-rw-r--r--drivers/ide/palm_bk3710.c12
-rw-r--r--drivers/ide/pdc202xx_new.c8
-rw-r--r--drivers/ide/pdc202xx_old.c31
-rw-r--r--drivers/ide/piix.c20
-rw-r--r--drivers/ide/pmac.c13
-rw-r--r--drivers/ide/qd65xx.c10
-rw-r--r--drivers/ide/sc1200.c8
-rw-r--r--drivers/ide/scc_pata.c24
-rw-r--r--drivers/ide/serverworks.c50
-rw-r--r--drivers/ide/sgiioc4.c2
-rw-r--r--drivers/ide/siimage.c14
-rw-r--r--drivers/ide/sis5513.c8
-rw-r--r--drivers/ide/sl82c105.c8
-rw-r--r--drivers/ide/slc90e66.c17
-rw-r--r--drivers/ide/tc86c001.c9
-rw-r--r--drivers/ide/triflex.c10
-rw-r--r--drivers/ide/tx4938ide.c7
-rw-r--r--drivers/ide/tx4939ide.c10
-rw-r--r--drivers/ide/umc8672.c5
-rw-r--r--drivers/ide/via82cxxx.c132
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/ucm.c63
-rw-r--r--drivers/infiniband/core/ud_header.c14
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c173
-rw-r--r--drivers/infiniband/core/uverbs.h13
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c25
-rw-r--r--drivers/infiniband/core/uverbs_main.c256
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c15
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c80
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h9
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c484
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c61
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c47
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h97
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c506
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c64
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c281
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c91
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h6
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c39
-rw-r--r--drivers/media/video/dabusb.c8
-rw-r--r--drivers/mmc/card/sdio_uart.c93
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c57
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c10
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
-rw-r--r--drivers/parport/parport_pc.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c14
-rw-r--r--drivers/pcmcia/Kconfig14
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c1908
-rw-r--r--drivers/pcmcia/db1xxx_ss.c19
-rw-r--r--drivers/pcmcia/pd6729.c18
-rw-r--r--drivers/pcmcia/rsrc_mgr.c3
-rw-r--r--drivers/pcmcia/xxs1500_ss.c16
-rw-r--r--drivers/pcmcia/yenta_socket.c8
-rw-r--r--drivers/platform/x86/Kconfig13
-rw-r--r--drivers/platform/x86/asus-laptop.c1741
-rw-r--r--drivers/platform/x86/asus_acpi.c3
-rw-r--r--drivers/platform/x86/classmate-laptop.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c9
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c116
-rw-r--r--drivers/platform/x86/toshiba_acpi.c30
-rw-r--r--drivers/power/Kconfig4
-rw-r--r--drivers/power/bq27x00_battery.c177
-rw-r--r--drivers/power/da9030_battery.c2
-rw-r--r--drivers/power/wm97xx_battery.c4
-rw-r--r--drivers/regulator/Kconfig29
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/ab3100.c6
-rw-r--r--drivers/regulator/core.c79
-rw-r--r--drivers/regulator/dummy.c66
-rw-r--r--drivers/regulator/dummy.h31
-rw-r--r--drivers/regulator/fixed.c30
-rw-r--r--drivers/regulator/lp3971.c68
-rw-r--r--drivers/regulator/max1586.c9
-rw-r--r--drivers/regulator/max8649.c408
-rw-r--r--drivers/regulator/max8660.c11
-rw-r--r--drivers/regulator/mc13783-regulator.c465
-rw-r--r--drivers/regulator/pcap-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c35
-rw-r--r--drivers/regulator/tps6507x-regulator.c34
-rw-r--r--drivers/regulator/twl-regulator.c22
-rw-r--r--drivers/regulator/virtual.c64
-rw-r--r--drivers/regulator/wm831x-dcdc.c12
-rw-r--r--drivers/regulator/wm831x-isink.c3
-rw-r--r--drivers/regulator/wm831x-ldo.c5
-rw-r--r--drivers/regulator/wm8350-regulator.c46
-rw-r--r--drivers/regulator/wm8400-regulator.c7
-rw-r--r--drivers/regulator/wm8994-regulator.c307
-rw-r--r--drivers/serial/68328serial.c8
-rw-r--r--drivers/serial/8250.c21
-rw-r--r--drivers/serial/8250_pci.c31
-rw-r--r--drivers/serial/Kconfig53
-rw-r--r--drivers/serial/atmel_serial.c22
-rw-r--r--drivers/serial/bcm63xx_uart.c7
-rw-r--r--drivers/serial/bfin_5xx.c22
-rw-r--r--drivers/serial/bfin_sport_uart.c701
-rw-r--r--drivers/serial/bfin_sport_uart.h38
-rw-r--r--drivers/serial/icom.c5
-rw-r--r--drivers/serial/imx.c6
-rw-r--r--drivers/serial/ioc3_serial.c3
-rw-r--r--drivers/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/serial/jsm/jsm_tty.c9
-rw-r--r--drivers/serial/msm_serial.c6
-rw-r--r--drivers/serial/timbuart.c7
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c2
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c192
-rw-r--r--drivers/usb/atm/usbatm.c3
-rw-r--r--drivers/usb/atm/usbatm.h15
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c8
-rw-r--r--drivers/usb/class/cdc-acm.c82
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usblp.c22
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/Kconfig4
-rw-r--r--drivers/usb/core/devices.c32
-rw-r--r--drivers/usb/core/devio.c127
-rw-r--r--drivers/usb/core/driver.c918
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd.c27
-rw-r--r--drivers/usb/core/hcd.h13
-rw-r--r--drivers/usb/core/hub.c120
-rw-r--r--drivers/usb/core/message.c5
-rw-r--r--drivers/usb/core/quirks.c18
-rw-r--r--drivers/usb/core/sysfs.c85
-rw-r--r--drivers/usb/core/urb.c13
-rw-r--r--drivers/usb/core/usb.c38
-rw-r--r--drivers/usb/core/usb.h43
-rw-r--r--drivers/usb/early/ehci-dbgp.c68
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c9
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/epautoconf.c24
-rw-r--r--drivers/usb/gadget/ether.c2
-rw-r--r--drivers/usb/gadget/f_acm.c8
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_mass_storage.c52
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/gadget/file_storage.c10
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h59
-rw-r--r--drivers/usb/gadget/gmidi.c5
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c39
-rw-r--r--drivers/usb/gadget/mass_storage.c8
-rw-r--r--drivers/usb/gadget/nokia.c259
-rw-r--r--drivers/usb/gadget/printer.c18
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c135
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h6
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c11
-rw-r--r--drivers/usb/gadget/u_ether.c5
-rw-r--r--drivers/usb/gadget/u_ether.h7
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c6
-rw-r--r--drivers/usb/host/ehci-fsl.c97
-rw-r--r--drivers/usb/host/ehci-mxc.c23
-rw-r--r--drivers/usb/host/ehci-omap.c47
-rw-r--r--drivers/usb/host/ehci-orion.c8
-rw-r--r--drivers/usb/host/ehci-ppc-of.c14
-rw-r--r--drivers/usb/host/ehci-sched.c12
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c8
-rw-r--r--drivers/usb/host/fhci-hcd.c4
-rw-r--r--drivers/usb/host/imx21-dbg.c527
-rw-r--r--drivers/usb/host/imx21-hcd.c1789
-rw-r--r--drivers/usb/host/imx21-hcd.h436
-rw-r--r--drivers/usb/host/isp1362-hcd.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c10
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c456
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-lh7a404.c11
-rw-r--r--drivers/usb/host/ohci-pnx4008.c6
-rw-r--r--drivers/usb/host/ohci-ppc-of.c10
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c8
-rw-r--r--drivers/usb/host/ohci-sa1111.c8
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/uhci-hcd.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c19
-rw-r--r--drivers/usb/host/xhci-ext-caps.h7
-rw-r--r--drivers/usb/host/xhci-hcd.c150
-rw-r--r--drivers/usb/host/xhci-hub.c65
-rw-r--r--drivers/usb/host/xhci-mem.c47
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c41
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/image/mdc800.c2
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/misc/Kconfig25
-rw-r--r--drivers/usb/misc/Makefile2
-rw-r--r--drivers/usb/misc/adutux.c8
-rw-r--r--drivers/usb/misc/appledisplay.c5
-rw-r--r--drivers/usb/misc/berry_charge.c183
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/misc/cytherm.c2
-rw-r--r--drivers/usb/misc/emi26.c2
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/misc/ftdi-elan.c11
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c13
-rw-r--r--drivers/usb/misc/rio500.c11
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c20
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usblcd.c7
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/misc/vstusb.c783
-rw-r--r--drivers/usb/mon/mon_bin.c7
-rw-r--r--drivers/usb/mon/mon_text.c6
-rw-r--r--drivers/usb/musb/blackfin.c28
-rw-r--r--drivers/usb/musb/cppi_dma.c33
-rw-r--r--drivers/usb/musb/musb_core.c562
-rw-r--r--drivers/usb/musb/musb_core.h72
-rw-r--r--drivers/usb/musb/musb_gadget.c20
-rw-r--r--drivers/usb/musb/musb_host.c34
-rw-r--r--drivers/usb/musb/musb_regs.h101
-rw-r--r--drivers/usb/musb/musbhsdma.c25
-rw-r--r--drivers/usb/musb/musbhsdma.h17
-rw-r--r--drivers/usb/musb/omap2430.c48
-rw-r--r--drivers/usb/musb/omap2430.h32
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/tusb6010_omap.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c45
-rw-r--r--drivers/usb/serial/Kconfig19
-rw-r--r--drivers/usb/serial/Makefile2
-rw-r--r--drivers/usb/serial/aircable.c36
-rw-r--r--drivers/usb/serial/ark3116.c3
-rw-r--r--drivers/usb/serial/belkin_sa.c2
-rw-r--r--drivers/usb/serial/ch341.c27
-rw-r--r--drivers/usb/serial/cp210x.c7
-rw-r--r--drivers/usb/serial/cyberjack.c5
-rw-r--r--drivers/usb/serial/cypress_m8.c82
-rw-r--r--drivers/usb/serial/digi_acceleport.c38
-rw-r--r--drivers/usb/serial/empeg.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c195
-rw-r--r--drivers/usb/serial/ftdi_sio.h6
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h39
-rw-r--r--drivers/usb/serial/funsoft.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/generic.c7
-rw-r--r--drivers/usb/serial/hp4x.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c69
-rw-r--r--drivers/usb/serial/io_tables.h10
-rw-r--r--drivers/usb/serial/io_ti.c75
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c3
-rw-r--r--drivers/usb/serial/ir-usb.c13
-rw-r--r--drivers/usb/serial/iuu_phoenix.c2
-rw-r--r--drivers/usb/serial/keyspan.c57
-rw-r--r--drivers/usb/serial/keyspan.h10
-rw-r--r--drivers/usb/serial/keyspan_pda.c60
-rw-r--r--drivers/usb/serial/kl5kusb105.c66
-rw-r--r--drivers/usb/serial/kobil_sct.c25
-rw-r--r--drivers/usb/serial/mct_u232.c57
-rw-r--r--drivers/usb/serial/mct_u232.h2
-rw-r--r--drivers/usb/serial/mos7720.c185
-rw-r--r--drivers/usb/serial/mos7840.c27
-rw-r--r--drivers/usb/serial/moto_modem.c2
-rw-r--r--drivers/usb/serial/navman.c3
-rw-r--r--drivers/usb/serial/omninet.c8
-rw-r--r--drivers/usb/serial/opticon.c17
-rw-r--r--drivers/usb/serial/option.c71
-rw-r--r--drivers/usb/serial/oti6858.c36
-rw-r--r--drivers/usb/serial/pl2303.c38
-rw-r--r--drivers/usb/serial/qcaux.c96
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/siemens_mpi.c2
-rw-r--r--drivers/usb/serial/sierra.c59
-rw-r--r--drivers/usb/serial/spcp8x5.c27
-rw-r--r--drivers/usb/serial/symbolserial.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb-serial.c15
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/serial/visor.c40
-rw-r--r--drivers/usb/serial/vivopay-serial.c76
-rw-r--r--drivers/usb/serial/whiteheat.c24
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/scsiglue.c10
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/transport.c6
-rw-r--r--drivers/usb/storage/unusual_devs.h88
-rw-r--r--drivers/usb/usb-skeleton.c2
-rw-r--r--drivers/usb/wusbcore/cbaf.c2
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/usb/wusbcore/mmc.c2
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/fbmem.c1
-rw-r--r--drivers/xen/events.c8
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/inode.c5
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/afs/internal.h1
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/write.c21
-rw-r--r--fs/autofs4/autofs_i.h7
-rw-r--r--fs/autofs4/dev-ioctl.c11
-rw-r--r--fs/autofs4/expire.c6
-rw-r--r--fs/autofs4/inode.c63
-rw-r--r--fs/autofs4/root.c474
-rw-r--r--fs/bfs/inode.c5
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/cifs/CHANGES3
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h6
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/cifssmb.c360
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/dcache.c70
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/exofs/common.h39
-rw-r--r--fs/exofs/exofs.h55
-rw-r--r--fs/exofs/inode.c198
-rw-r--r--fs/exofs/ios.c575
-rw-r--r--fs/exofs/super.c121
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/inode.c11
-rw-r--r--fs/ext3/inode.c4
-rw-r--r--fs/ext4/balloc.c35
-rw-r--r--fs/ext4/block_validity.c4
-rw-r--r--fs/ext4/dir.c14
-rw-r--r--fs/ext4/ext4.h110
-rw-r--r--fs/ext4/ext4_jbd2.c4
-rw-r--r--fs/ext4/ext4_jbd2.h24
-rw-r--r--fs/ext4/extents.c260
-rw-r--r--fs/ext4/file.c10
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/ext4/ialloc.c32
-rw-r--r--fs/ext4/inode.c465
-rw-r--r--fs/ext4/ioctl.c12
-rw-r--r--fs/ext4/mballoc.c73
-rw-r--r--fs/ext4/mballoc.h9
-rw-r--r--fs/ext4/migrate.c35
-rw-r--r--fs/ext4/move_extent.c36
-rw-r--r--fs/ext4/namei.c63
-rw-r--r--fs/ext4/resize.c102
-rw-r--r--fs/ext4/super.c342
-rw-r--r--fs/ext4/xattr.c56
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fs-writeback.c22
-rw-r--r--fs/fuse/dev.c30
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/glock.c75
-rw-r--r--fs/gfs2/glock.h7
-rw-r--r--fs/gfs2/glops.c16
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/lock_dlm.c5
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/main.c28
-rw-r--r--fs/gfs2/meta_io.c46
-rw-r--r--fs/gfs2/meta_io.h12
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/ops_inode.c113
-rw-r--r--fs/gfs2/super.c32
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/gfs2/util.h1
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/super.c3
-rw-r--r--fs/hpfs/anode.c2
-rw-r--r--fs/hpfs/dentry.c14
-rw-r--r--fs/hpfs/dir.c14
-rw-r--r--fs/hpfs/dnode.c21
-rw-r--r--fs/hpfs/ea.c7
-rw-r--r--fs/hpfs/hpfs_fn.h30
-rw-r--r--fs/hpfs/inode.c4
-rw-r--r--fs/hpfs/map.c6
-rw-r--r--fs/hpfs/name.c21
-rw-r--r--fs/hpfs/namei.c75
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jbd2/checkpoint.c1
-rw-r--r--fs/jbd2/commit.c13
-rw-r--r--fs/jbd2/journal.c132
-rw-r--r--fs/jbd2/transaction.c43
-rw-r--r--fs/jfs/inode.c5
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/libfs.c77
-rw-r--r--fs/locks.c5
-rw-r--r--fs/minix/inode.c8
-rw-r--r--fs/namei.c555
-rw-r--r--fs/namespace.c53
-rw-r--r--fs/nfs/inode.c18
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/iostat.h4
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfsctl.c5
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/nfsd/vfs.c4
-rw-r--r--fs/nilfs2/dat.c3
-rw-r--r--fs/nilfs2/dir.c14
-rw-r--r--fs/nilfs2/ioctl.c66
-rw-r--r--fs/nilfs2/namei.c13
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nilfs2/recovery.c41
-rw-r--r--fs/nilfs2/segbuf.c18
-rw-r--r--fs/nilfs2/segbuf.h5
-rw-r--r--fs/nilfs2/segment.c120
-rw-r--r--fs/nilfs2/segment.h2
-rw-r--r--fs/nilfs2/super.c15
-rw-r--r--fs/nilfs2/the_nilfs.c38
-rw-r--r--fs/nilfs2/the_nilfs.h3
-rw-r--r--fs/notify/inotify/inotify_user.c59
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ntfs/inode.h4
-rw-r--r--fs/ntfs/super.c8
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/alloc.c5
-rw-r--r--fs/ocfs2/aops.c5
-rw-r--r--fs/ocfs2/cluster/masklog.c1
-rw-r--r--fs/ocfs2/cluster/masklog.h7
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/Makefile3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/dlmfs/Makefile5
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c (renamed from fs/ocfs2/dlm/dlmfs.c)127
-rw-r--r--fs/ocfs2/dlmfs/dlmfsver.c (renamed from fs/ocfs2/dlm/dlmfsver.c)0
-rw-r--r--fs/ocfs2/dlmfs/dlmfsver.h (renamed from fs/ocfs2/dlm/dlmfsver.h)0
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c (renamed from fs/ocfs2/dlm/userdlm.c)308
-rw-r--r--fs/ocfs2/dlmfs/userdlm.h (renamed from fs/ocfs2/dlm/userdlm.h)16
-rw-r--r--fs/ocfs2/dlmglue.c284
-rw-r--r--fs/ocfs2/file.c13
-rw-r--r--fs/ocfs2/ioctl.h6
-rw-r--r--fs/ocfs2/localalloc.c2
-rw-r--r--fs/ocfs2/ocfs2.h32
-rw-r--r--fs/ocfs2/ocfs2_fs.h57
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h79
-rw-r--r--fs/ocfs2/ocfs2_lockingver.h2
-rw-r--r--fs/ocfs2/refcounttree.c6
-rw-r--r--fs/ocfs2/stack_o2cb.c37
-rw-r--r--fs/ocfs2/stack_user.c49
-rw-r--r--fs/ocfs2/stackglue.c98
-rw-r--r--fs/ocfs2/stackglue.h95
-rw-r--r--fs/ocfs2/suballoc.c171
-rw-r--r--fs/ocfs2/suballoc.h1
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/ocfs2/xattr.c2182
-rw-r--r--fs/omfs/inode.c10
-rw-r--r--fs/open.c2
-rw-r--r--fs/pnode.c28
-rw-r--r--fs/pnode.h5
-rw-r--r--fs/proc/base.c10
-rw-r--r--fs/proc/generic.c5
-rw-r--r--fs/proc/kmsg.c14
-rw-r--r--fs/proc/root.c6
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/squashfs/Makefile2
-rw-r--r--fs/squashfs/block.c76
-rw-r--r--fs/squashfs/cache.c1
-rw-r--r--fs/squashfs/decompressor.c68
-rw-r--r--fs/squashfs/decompressor.h55
-rw-r--r--fs/squashfs/dir.c1
-rw-r--r--fs/squashfs/export.c1
-rw-r--r--fs/squashfs/file.c1
-rw-r--r--fs/squashfs/fragment.c1
-rw-r--r--fs/squashfs/id.c1
-rw-r--r--fs/squashfs/inode.c1
-rw-r--r--fs/squashfs/namei.c1
-rw-r--r--fs/squashfs/squashfs.h8
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/squashfs/squashfs_fs_sb.h40
-rw-r--r--fs/squashfs/super.c49
-rw-r--r--fs/squashfs/symlink.c1
-rw-r--r--fs/squashfs/zlib_wrapper.c150
-rw-r--r--fs/super.c21
-rw-r--r--fs/sysv/inode.c10
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/ubifs/dir.c2
-rw-r--r--fs/ubifs/file.c8
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/udf/balloc.c2
-rw-r--r--fs/udf/dir.c4
-rw-r--r--fs/udf/inode.c6
-rw-r--r--fs/udf/namei.c20
-rw-r--r--fs/udf/symlink.c10
-rw-r--r--fs/udf/udfdecl.h2
-rw-r--r--fs/ufs/dir.c10
-rw-r--r--fs/ufs/inode.c5
-rw-r--r--fs/ufs/ufs.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c8
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-generic/local.h19
-rw-r--r--include/asm-generic/percpu.h18
-rw-r--r--include/drm/drmP.h28
-rw-r--r--include/drm/drm_buffer.h148
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_edid.h3
-rw-r--r--include/drm/drm_pciids.h36
-rw-r--r--include/drm/nouveau_drm.h86
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/linux/audit.h11
-rw-r--r--include/linux/blktrace_api.h4
-rw-r--r--include/linux/bootmem.h7
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--include/linux/early_res.h23
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/firewire-cdev.h40
-rw-r--r--include/linux/firewire.h11
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/fsnotify.h11
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfs2_ondisk.h30
-rw-r--r--include/linux/i2c-smbus.h50
-rw-r--r--include/linux/i2c.h8
-rw-r--r--include/linux/ide.h7
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/jbd2.h11
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kprobes.h44
-rw-r--r--include/linux/kvm.h10
-rw-r--r--include/linux/kvm_host.h71
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/mfd/mc13783.h2
-rw-r--r--include/linux/mm.h16
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mnt_namespace.h1
-rw-r--r--include/linux/module.h37
-rw-r--r--include/linux/mount.h16
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nilfs2_fs.h1
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/percpu-defs.h40
-rw-r--r--include/linux/percpu.h44
-rw-r--r--include/linux/percpu_counter.h2
-rw-r--r--include/linux/range.h30
-rw-r--r--include/linux/regulator/consumer.h4
-rw-r--r--include/linux/regulator/driver.h6
-rw-r--r--include/linux/regulator/fixed.h2
-rw-r--r--include/linux/regulator/max8649.h44
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/security.h14
-rw-r--r--include/linux/srcu.h2
-rw-r--r--include/linux/syslog.h52
-rw-r--r--include/linux/tty.h10
-rw-r--r--include/linux/tty_flip.h7
-rw-r--r--include/linux/usb.h53
-rw-r--r--include/linux/usb/Kbuild1
-rw-r--r--include/linux/usb/atmel_usba_udc.h1
-rw-r--r--include/linux/usb/ch9.h2
-rw-r--r--include/linux/usb/musb.h23
-rw-r--r--include/linux/usb/otg.h25
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/serial.h13
-rw-r--r--include/linux/usb/vstusb.h71
-rw-r--r--include/linux/vga_switcheroo.h57
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/vt.h3
-rw-r--r--include/rdma/ib_pack.h1
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/trace/events/ext4.h101
-rw-r--r--include/trace/events/jbd2.h28
-rw-r--r--include/trace/events/kvm.h41
-rw-r--r--init/do_mounts_initrd.c4
-rw-r--r--init/main.c27
-rw-r--r--ipc/mqueue.c120
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/audit_tree.c100
-rw-r--r--kernel/auditsc.c7
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/early_res.c578
-rw-r--r--kernel/irq/chip.c52
-rw-r--r--kernel/irq/handle.c58
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/numa_migrate.c4
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c647
-rw-r--r--kernel/module.c29
-rw-r--r--kernel/padata.c8
-rw-r--r--kernel/printk.c52
-rw-r--r--kernel/range.c163
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/resource.c9
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/signal.c43
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/sysctl_binary.c7
-rw-r--r--kernel/trace/ring_buffer.c1
-rw-r--r--kernel/trace/ring_buffer_benchmark.c1
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_functions_graph.c4
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/bootmem.c195
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/page_alloc.c263
-rw-r--r--mm/percpu.c36
-rw-r--r--mm/sparse-vmemmap.c76
-rw-r--r--mm/sparse.c196
-rw-r--r--mm/vmstat.c15
-rw-r--r--net/sunrpc/rpc_pipe.c9
-rw-r--r--security/capability.c4
-rw-r--r--security/commoncap.c9
-rw-r--r--security/security.c49
-rw-r--r--security/selinux/avc.c22
-rw-r--r--security/selinux/hooks.c41
-rw-r--r--security/selinux/include/security.h13
-rw-r--r--security/selinux/selinuxfs.c12
-rw-r--r--security/selinux/ss/context.h12
-rw-r--r--security/selinux/ss/mls.c48
-rw-r--r--security/selinux/ss/mls.h2
-rw-r--r--security/selinux/ss/mls_types.h7
-rw-r--r--security/selinux/ss/policydb.c127
-rw-r--r--security/selinux/ss/policydb.h10
-rw-r--r--security/selinux/ss/services.c273
-rw-r--r--security/smack/smack_lsm.c6
-rw-r--r--security/tomoyo/Makefile2
-rw-r--r--security/tomoyo/common.c374
-rw-r--r--security/tomoyo/common.h530
-rw-r--r--security/tomoyo/domain.c391
-rw-r--r--security/tomoyo/file.c731
-rw-r--r--security/tomoyo/gc.c370
-rw-r--r--security/tomoyo/realpath.c292
-rw-r--r--security/tomoyo/realpath.h66
-rw-r--r--security/tomoyo/tomoyo.c142
-rw-r--r--security/tomoyo/tomoyo.h94
-rw-r--r--tools/perf/Documentation/perf-probe.txt58
-rw-r--r--tools/perf/Makefile10
-rw-r--r--tools/perf/builtin-probe.c36
-rw-r--r--tools/perf/util/probe-event.c55
-rw-r--r--tools/perf/util/probe-finder.c1002
-rw-r--r--tools/perf/util/probe-finder.h53
-rw-r--r--tools/perf/util/string.c55
-rw-r--r--tools/perf/util/string.h1
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/assigned-dev.c12
-rw-r--r--virt/kvm/coalesced_mmio.c43
-rw-r--r--virt/kvm/coalesced_mmio.h15
-rw-r--r--virt/kvm/eventfd.c21
-rw-r--r--virt/kvm/ioapic.c38
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/iommu.c36
-rw-r--r--virt/kvm/kvm_main.c392
1101 files changed, 51342 insertions, 26730 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index a07c0f366f91..a986e9bbba3d 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -159,3 +159,14 @@ Description:
159 device. This is useful to ensure auto probing won't 159 device. This is useful to ensure auto probing won't
160 match the driver to the device. For example: 160 match the driver to the device. For example:
161 # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id 161 # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
162
163What: /sys/bus/usb/device/.../avoid_reset
164Date: December 2009
165Contact: Oliver Neukum <oliver@neukum.org>
166Description:
167 Writing 1 to this file tells the kernel that this
168 device will morph into another mode when it is reset.
169 Drivers will not use reset for error handling for
170 such devices.
171Users:
172 usb_modeswitch
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-laptop b/Documentation/ABI/testing/sysfs-platform-asus-laptop
index a1cb660c50cf..1d775390e856 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-asus-laptop
@@ -1,4 +1,4 @@
1What: /sys/devices/platform/asus-laptop/display 1What: /sys/devices/platform/asus_laptop/display
2Date: January 2007 2Date: January 2007
3KernelVersion: 2.6.20 3KernelVersion: 2.6.20
4Contact: "Corentin Chary" <corentincj@iksaif.net> 4Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -13,7 +13,7 @@ Description:
13 Ex: - 0 (0000b) means no display 13 Ex: - 0 (0000b) means no display
14 - 3 (0011b) CRT+LCD. 14 - 3 (0011b) CRT+LCD.
15 15
16What: /sys/devices/platform/asus-laptop/gps 16What: /sys/devices/platform/asus_laptop/gps
17Date: January 2007 17Date: January 2007
18KernelVersion: 2.6.20 18KernelVersion: 2.6.20
19Contact: "Corentin Chary" <corentincj@iksaif.net> 19Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -21,7 +21,7 @@ Description:
21 Control the gps device. 1 means on, 0 means off. 21 Control the gps device. 1 means on, 0 means off.
22Users: Lapsus 22Users: Lapsus
23 23
24What: /sys/devices/platform/asus-laptop/ledd 24What: /sys/devices/platform/asus_laptop/ledd
25Date: January 2007 25Date: January 2007
26KernelVersion: 2.6.20 26KernelVersion: 2.6.20
27Contact: "Corentin Chary" <corentincj@iksaif.net> 27Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -29,11 +29,11 @@ Description:
29 Some models like the W1N have a LED display that can be 29 Some models like the W1N have a LED display that can be
30 used to display several informations. 30 used to display several informations.
31 To control the LED display, use the following : 31 To control the LED display, use the following :
32 echo 0x0T000DDD > /sys/devices/platform/asus-laptop/ 32 echo 0x0T000DDD > /sys/devices/platform/asus_laptop/
33 where T control the 3 letters display, and DDD the 3 digits display. 33 where T control the 3 letters display, and DDD the 3 digits display.
34 The DDD table can be found in Documentation/laptops/asus-laptop.txt 34 The DDD table can be found in Documentation/laptops/asus-laptop.txt
35 35
36What: /sys/devices/platform/asus-laptop/bluetooth 36What: /sys/devices/platform/asus_laptop/bluetooth
37Date: January 2007 37Date: January 2007
38KernelVersion: 2.6.20 38KernelVersion: 2.6.20
39Contact: "Corentin Chary" <corentincj@iksaif.net> 39Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -42,7 +42,7 @@ Description:
42 This may control the led, the device or both. 42 This may control the led, the device or both.
43Users: Lapsus 43Users: Lapsus
44 44
45What: /sys/devices/platform/asus-laptop/wlan 45What: /sys/devices/platform/asus_laptop/wlan
46Date: January 2007 46Date: January 2007
47KernelVersion: 2.6.20 47KernelVersion: 2.6.20
48Contact: "Corentin Chary" <corentincj@iksaif.net> 48Contact: "Corentin Chary" <corentincj@iksaif.net>
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-laptop b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
index 7445dfb321b5..5b026c69587a 100644
--- a/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
@@ -1,4 +1,4 @@
1What: /sys/devices/platform/eeepc-laptop/disp 1What: /sys/devices/platform/eeepc/disp
2Date: May 2008 2Date: May 2008
3KernelVersion: 2.6.26 3KernelVersion: 2.6.26
4Contact: "Corentin Chary" <corentincj@iksaif.net> 4Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -9,21 +9,21 @@ Description:
9 - 3 = LCD+CRT 9 - 3 = LCD+CRT
10 If you run X11, you should use xrandr instead. 10 If you run X11, you should use xrandr instead.
11 11
12What: /sys/devices/platform/eeepc-laptop/camera 12What: /sys/devices/platform/eeepc/camera
13Date: May 2008 13Date: May 2008
14KernelVersion: 2.6.26 14KernelVersion: 2.6.26
15Contact: "Corentin Chary" <corentincj@iksaif.net> 15Contact: "Corentin Chary" <corentincj@iksaif.net>
16Description: 16Description:
17 Control the camera. 1 means on, 0 means off. 17 Control the camera. 1 means on, 0 means off.
18 18
19What: /sys/devices/platform/eeepc-laptop/cardr 19What: /sys/devices/platform/eeepc/cardr
20Date: May 2008 20Date: May 2008
21KernelVersion: 2.6.26 21KernelVersion: 2.6.26
22Contact: "Corentin Chary" <corentincj@iksaif.net> 22Contact: "Corentin Chary" <corentincj@iksaif.net>
23Description: 23Description:
24 Control the card reader. 1 means on, 0 means off. 24 Control the card reader. 1 means on, 0 means off.
25 25
26What: /sys/devices/platform/eeepc-laptop/cpufv 26What: /sys/devices/platform/eeepc/cpufv
27Date: Jun 2009 27Date: Jun 2009
28KernelVersion: 2.6.31 28KernelVersion: 2.6.31
29Contact: "Corentin Chary" <corentincj@iksaif.net> 29Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -42,7 +42,7 @@ Description:
42 `------------ Availables modes 42 `------------ Availables modes
43 For example, 0x301 means: mode 1 selected, 3 available modes. 43 For example, 0x301 means: mode 1 selected, 3 available modes.
44 44
45What: /sys/devices/platform/eeepc-laptop/available_cpufv 45What: /sys/devices/platform/eeepc/available_cpufv
46Date: Jun 2009 46Date: Jun 2009
47KernelVersion: 2.6.31 47KernelVersion: 2.6.31
48Contact: "Corentin Chary" <corentincj@iksaif.net> 48Contact: "Corentin Chary" <corentincj@iksaif.net>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index f9a6e2c75f12..1b2dd4fc3db2 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -45,7 +45,7 @@
45 </sect1> 45 </sect1>
46 46
47 <sect1><title>Atomic and pointer manipulation</title> 47 <sect1><title>Atomic and pointer manipulation</title>
48!Iarch/x86/include/asm/atomic_32.h 48!Iarch/x86/include/asm/atomic.h
49!Iarch/x86/include/asm/unaligned.h 49!Iarch/x86/include/asm/unaligned.h
50 </sect1> 50 </sect1>
51 51
diff --git a/Documentation/DocBook/deviceiobook.tmpl b/Documentation/DocBook/deviceiobook.tmpl
index 3ed88126ab8f..c1ed6a49e598 100644
--- a/Documentation/DocBook/deviceiobook.tmpl
+++ b/Documentation/DocBook/deviceiobook.tmpl
@@ -316,7 +316,7 @@ CPU B: spin_unlock_irqrestore(&amp;dev_lock, flags)
316 316
317 <chapter id="pubfunctions"> 317 <chapter id="pubfunctions">
318 <title>Public Functions Provided</title> 318 <title>Public Functions Provided</title>
319!Iarch/x86/include/asm/io_32.h 319!Iarch/x86/include/asm/io.h
320!Elib/iomap.c 320!Elib/iomap.c
321 </chapter> 321 </chapter>
322 322
diff --git a/Documentation/cdrom/ide-cd b/Documentation/cdrom/ide-cd
index 2c558cd6c1ef..f4dc9de2694e 100644
--- a/Documentation/cdrom/ide-cd
+++ b/Documentation/cdrom/ide-cd
@@ -159,42 +159,7 @@ two arguments: the CDROM device, and the slot number to which you wish
159to change. If the slot number is -1, the drive is unloaded. 159to change. If the slot number is -1, the drive is unloaded.
160 160
161 161
1624. Compilation options 1624. Common problems
163----------------------
164
165There are a few additional options which can be set when compiling the
166driver. Most people should not need to mess with any of these; they
167are listed here simply for completeness. A compilation option can be
168enabled by adding a line of the form `#define <option> 1' to the top
169of ide-cd.c. All these options are disabled by default.
170
171VERBOSE_IDE_CD_ERRORS
172 If this is set, ATAPI error codes will be translated into textual
173 descriptions. In addition, a dump is made of the command which
174 provoked the error. This is off by default to save the memory used
175 by the (somewhat long) table of error descriptions.
176
177STANDARD_ATAPI
178 If this is set, the code needed to deal with certain drives which do
179 not properly implement the ATAPI spec will be disabled. If you know
180 your drive implements ATAPI properly, you can turn this on to get a
181 slightly smaller kernel.
182
183NO_DOOR_LOCKING
184 If this is set, the driver will never attempt to lock the door of
185 the drive.
186
187CDROM_NBLOCKS_BUFFER
188 This sets the size of the buffer to be used for a CDROMREADAUDIO
189 ioctl. The default is 8.
190
191TEST
192 This currently enables an additional ioctl which enables a user-mode
193 program to execute an arbitrary packet command. See the source for
194 details. This should be left off unless you know what you're doing.
195
196
1975. Common problems
198------------------ 163------------------
199 164
200This section discusses some common problems encountered when trying to 165This section discusses some common problems encountered when trying to
@@ -371,7 +336,7 @@ f. Data corruption.
371 expense of low system performance. 336 expense of low system performance.
372 337
373 338
3746. cdchange.c 3395. cdchange.c
375------------- 340-------------
376 341
377/* 342/*
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 73ef30dbe612..31575e220f3b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -117,19 +117,25 @@ Who: Mauro Carvalho Chehab <mchehab@infradead.org>
117--------------------------- 117---------------------------
118 118
119What: PCMCIA control ioctl (needed for pcmcia-cs [cardmgr, cardctl]) 119What: PCMCIA control ioctl (needed for pcmcia-cs [cardmgr, cardctl])
120When: November 2005 120When: 2.6.35/2.6.36
121Files: drivers/pcmcia/: pcmcia_ioctl.c 121Files: drivers/pcmcia/: pcmcia_ioctl.c
122Why: With the 16-bit PCMCIA subsystem now behaving (almost) like a 122Why: With the 16-bit PCMCIA subsystem now behaving (almost) like a
123 normal hotpluggable bus, and with it using the default kernel 123 normal hotpluggable bus, and with it using the default kernel
124 infrastructure (hotplug, driver core, sysfs) keeping the PCMCIA 124 infrastructure (hotplug, driver core, sysfs) keeping the PCMCIA
125 control ioctl needed by cardmgr and cardctl from pcmcia-cs is 125 control ioctl needed by cardmgr and cardctl from pcmcia-cs is
126 unnecessary, and makes further cleanups and integration of the 126 unnecessary and potentially harmful (it does not provide for
127 proper locking), and makes further cleanups and integration of the
127 PCMCIA subsystem into the Linux kernel device driver model more 128 PCMCIA subsystem into the Linux kernel device driver model more
128 difficult. The features provided by cardmgr and cardctl are either 129 difficult. The features provided by cardmgr and cardctl are either
129 handled by the kernel itself now or are available in the new 130 handled by the kernel itself now or are available in the new
130 pcmciautils package available at 131 pcmciautils package available at
131 http://kernel.org/pub/linux/utils/kernel/pcmcia/ 132 http://kernel.org/pub/linux/utils/kernel/pcmcia/
132Who: Dominik Brodowski <linux@brodo.de> 133
134 For all architectures except ARM, the associated config symbol
135 has been removed from kernel 2.6.34; for ARM, it will be likely
136 be removed from kernel 2.6.35. The actual code will then likely
137 be removed from kernel 2.6.36.
138Who: Dominik Brodowski <linux@dominikbrodowski.net>
133 139
134--------------------------- 140---------------------------
135 141
@@ -550,3 +556,35 @@ Why: udev fully replaces this special file system that only contains CAPI
550 NCCI TTY device nodes. User space (pppdcapiplugin) works without 556 NCCI TTY device nodes. User space (pppdcapiplugin) works without
551 noticing the difference. 557 noticing the difference.
552Who: Jan Kiszka <jan.kiszka@web.de> 558Who: Jan Kiszka <jan.kiszka@web.de>
559
560----------------------------
561
562What: KVM memory aliases support
563When: July 2010
564Why: Memory aliasing support is used for speeding up guest vga access
565 through the vga windows.
566
567 Modern userspace no longer uses this feature, so it's just bitrotted
568 code and can be removed with no impact.
569Who: Avi Kivity <avi@redhat.com>
570
571----------------------------
572
573What: KVM kernel-allocated memory slots
574When: July 2010
575Why: Since 2.6.25, kvm supports user-allocated memory slots, which are
576 much more flexible than kernel-allocated slots. All current userspace
577 supports the newer interface and this code can be removed with no
578 impact.
579Who: Avi Kivity <avi@redhat.com>
580
581----------------------------
582
583What: KVM paravirt mmu host support
584When: January 2011
585Why: The paravirt mmu host support is slower than non-paravirt mmu, both
586 on newer and older hardware. It is already not exposed to the guest,
587 and kept only for live migration purposes.
588Who: Avi Kivity <avi@redhat.com>
589
590----------------------------
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 839efd8a8a8c..cf6d0d85ca82 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -74,6 +74,9 @@ norecovery Disable recovery of the filesystem on mount.
74 This disables every write access on the device for 74 This disables every write access on the device for
75 read-only mounts or snapshots. This option will fail 75 read-only mounts or snapshots. This option will fail
76 for r/w mounts on an unclean volume. 76 for r/w mounts on an unclean volume.
77discard Issue discard/TRIM commands to the underlying block
78 device when blocks are freed. This is useful for SSD
79 devices and sparse/thinly-provisioned LUNs.
77 80
78NILFS2 usage 81NILFS2 usage
79============ 82============
diff --git a/Documentation/filesystems/sharedsubtree.txt b/Documentation/filesystems/sharedsubtree.txt
index 23a181074f94..fc0e39af43c3 100644
--- a/Documentation/filesystems/sharedsubtree.txt
+++ b/Documentation/filesystems/sharedsubtree.txt
@@ -837,6 +837,9 @@ replicas continue to be exactly same.
837 individual lists does not affect propagation or the way propagation 837 individual lists does not affect propagation or the way propagation
838 tree is modified by operations. 838 tree is modified by operations.
839 839
840 All vfsmounts in a peer group have the same ->mnt_master. If it is
841 non-NULL, they form a contiguous (ordered) segment of slave list.
842
840 A example propagation tree looks as shown in the figure below. 843 A example propagation tree looks as shown in the figure below.
841 [ NOTE: Though it looks like a forest, if we consider all the shared 844 [ NOTE: Though it looks like a forest, if we consider all the shared
842 mounts as a conceptual entity called 'pnode', it becomes a tree] 845 mounts as a conceptual entity called 'pnode', it becomes a tree]
@@ -874,8 +877,19 @@ replicas continue to be exactly same.
874 877
875 NOTE: The propagation tree is orthogonal to the mount tree. 878 NOTE: The propagation tree is orthogonal to the mount tree.
876 879
8808B Locking:
881
882 ->mnt_share, ->mnt_slave, ->mnt_slave_list, ->mnt_master are protected
883 by namespace_sem (exclusive for modifications, shared for reading).
884
885 Normally we have ->mnt_flags modifications serialized by vfsmount_lock.
886 There are two exceptions: do_add_mount() and clone_mnt().
887 The former modifies a vfsmount that has not been visible in any shared
888 data structures yet.
889 The latter holds namespace_sem and the only references to vfsmount
890 are in lists that can't be traversed without namespace_sem.
877 891
8788B Algorithm: 8928C Algorithm:
879 893
880 The crux of the implementation resides in rbind/move operation. 894 The crux of the implementation resides in rbind/move operation.
881 895
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 81c0c59a60ea..e1bb5b261693 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -15,7 +15,8 @@ Supported adapters:
15 * Intel 82801I (ICH9) 15 * Intel 82801I (ICH9)
16 * Intel EP80579 (Tolapai) 16 * Intel EP80579 (Tolapai)
17 * Intel 82801JI (ICH10) 17 * Intel 82801JI (ICH10)
18 * Intel PCH 18 * Intel 3400/5 Series (PCH)
19 * Intel Cougar Point (PCH)
19 Datasheets: Publicly available at the Intel website 20 Datasheets: Publicly available at the Intel website
20 21
21Authors: 22Authors:
diff --git a/Documentation/i2c/busses/i2c-parport b/Documentation/i2c/busses/i2c-parport
index dceaba1ad930..2461c7b53b2c 100644
--- a/Documentation/i2c/busses/i2c-parport
+++ b/Documentation/i2c/busses/i2c-parport
@@ -29,6 +29,9 @@ can be easily added when needed.
29Earlier kernels defaulted to type=0 (Philips). But now, if the type 29Earlier kernels defaulted to type=0 (Philips). But now, if the type
30parameter is missing, the driver will simply fail to initialize. 30parameter is missing, the driver will simply fail to initialize.
31 31
32SMBus alert support is available on adapters which have this line properly
33connected to the parallel port's interrupt pin.
34
32 35
33Building your own adapter 36Building your own adapter
34------------------------- 37-------------------------
diff --git a/Documentation/i2c/busses/i2c-parport-light b/Documentation/i2c/busses/i2c-parport-light
index 287436478520..bdc9cbb2e0f2 100644
--- a/Documentation/i2c/busses/i2c-parport-light
+++ b/Documentation/i2c/busses/i2c-parport-light
@@ -9,3 +9,14 @@ parport handling is not an option. The drawback is a reduced portability
9and the impossibility to daisy-chain other parallel port devices. 9and the impossibility to daisy-chain other parallel port devices.
10 10
11Please see i2c-parport for documentation. 11Please see i2c-parport for documentation.
12
13Module parameters:
14
15* type: type of adapter (see i2c-parport or modinfo)
16
17* base: base I/O address
18 Default is 0x378 which is fairly common for parallel ports, at least on PC.
19
20* irq: optional IRQ
21 This must be passed if you want SMBus alert support, assuming your adapter
22 actually supports this.
diff --git a/Documentation/i2c/smbus-protocol b/Documentation/i2c/smbus-protocol
index 9df47441f0e7..7c19d1a2bea0 100644
--- a/Documentation/i2c/smbus-protocol
+++ b/Documentation/i2c/smbus-protocol
@@ -185,6 +185,22 @@ the protocol. All ARP communications use slave address 0x61 and
185require PEC checksums. 185require PEC checksums.
186 186
187 187
188SMBus Alert
189===========
190
191SMBus Alert was introduced in Revision 1.0 of the specification.
192
193The SMBus alert protocol allows several SMBus slave devices to share a
194single interrupt pin on the SMBus master, while still allowing the master
195to know which slave triggered the interrupt.
196
197This is implemented the following way in the Linux kernel:
198* I2C bus drivers which support SMBus alert should call
199 i2c_setup_smbus_alert() to setup SMBus alert support.
200* I2C drivers for devices which can trigger SMBus alerts should implement
201 the optional alert() callback.
202
203
188I2C Block Transactions 204I2C Block Transactions
189====================== 205======================
190 206
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 0a74603eb671..3219ee0dbfef 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -318,8 +318,9 @@ Plain I2C communication
318These routines read and write some bytes from/to a client. The client 318These routines read and write some bytes from/to a client. The client
319contains the i2c address, so you do not have to include it. The second 319contains the i2c address, so you do not have to include it. The second
320parameter contains the bytes to read/write, the third the number of bytes 320parameter contains the bytes to read/write, the third the number of bytes
321to read/write (must be less than the length of the buffer.) Returned is 321to read/write (must be less than the length of the buffer, also should be
322the actual number of bytes read/written. 322less than 64k since msg.len is u16.) Returned is the actual number of bytes
323read/written.
323 324
324 int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg, 325 int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg,
325 int num); 326 int num);
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 35cf64d4436d..35c9b51d20ea 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -139,7 +139,6 @@ Code Seq#(hex) Include File Comments
139'K' all linux/kd.h 139'K' all linux/kd.h
140'L' 00-1F linux/loop.h conflict! 140'L' 00-1F linux/loop.h conflict!
141'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict! 141'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
142'L' 20-2F linux/usb/vstusb.h
143'L' E0-FF linux/ppdd.h encrypted disk device driver 142'L' E0-FF linux/ppdd.h encrypted disk device driver
144 <http://linux01.gwdg.de/~alatham/ppdd.html> 143 <http://linux01.gwdg.de/~alatham/ppdd.html>
145'M' all linux/soundcard.h conflict! 144'M' all linux/soundcard.h conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fbcddc5abe25..d80930d58dae 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1794,6 +1794,12 @@ and is between 256 and 4096 characters. It is defined in the file
1794 purges which is reported from either PAL_VM_SUMMARY or 1794 purges which is reported from either PAL_VM_SUMMARY or
1795 SAL PALO. 1795 SAL PALO.
1796 1796
1797 nr_cpus= [SMP] Maximum number of processors that an SMP kernel
1798 could support. nr_cpus=n : n >= 1 limits the kernel to
1799 supporting 'n' processors. Later in runtime you can not
1800 use hotplug cpu feature to put more cpu back to online.
1801 just like you compile the kernel NR_CPUS=n
1802
1797 nr_uarts= [SERIAL] maximum number of UARTs to be registered. 1803 nr_uarts= [SERIAL] maximum number of UARTs to be registered.
1798 1804
1799 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. 1805 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 053037a1fe6d..2f9115c0ae62 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -1,6 +1,7 @@
1Title : Kernel Probes (Kprobes) 1Title : Kernel Probes (Kprobes)
2Authors : Jim Keniston <jkenisto@us.ibm.com> 2Authors : Jim Keniston <jkenisto@us.ibm.com>
3 : Prasanna S Panchamukhi <prasanna@in.ibm.com> 3 : Prasanna S Panchamukhi <prasanna.panchamukhi@gmail.com>
4 : Masami Hiramatsu <mhiramat@redhat.com>
4 5
5CONTENTS 6CONTENTS
6 7
@@ -15,6 +16,7 @@ CONTENTS
159. Jprobes Example 169. Jprobes Example
1610. Kretprobes Example 1710. Kretprobes Example
17Appendix A: The kprobes debugfs interface 18Appendix A: The kprobes debugfs interface
19Appendix B: The kprobes sysctl interface
18 20
191. Concepts: Kprobes, Jprobes, Return Probes 211. Concepts: Kprobes, Jprobes, Return Probes
20 22
@@ -42,13 +44,13 @@ registration/unregistration of a group of *probes. These functions
42can speed up unregistration process when you have to unregister 44can speed up unregistration process when you have to unregister
43a lot of probes at once. 45a lot of probes at once.
44 46
45The next three subsections explain how the different types of 47The next four subsections explain how the different types of
46probes work. They explain certain things that you'll need to 48probes work and how jump optimization works. They explain certain
47know in order to make the best use of Kprobes -- e.g., the 49things that you'll need to know in order to make the best use of
48difference between a pre_handler and a post_handler, and how 50Kprobes -- e.g., the difference between a pre_handler and
49to use the maxactive and nmissed fields of a kretprobe. But 51a post_handler, and how to use the maxactive and nmissed fields of
50if you're in a hurry to start using Kprobes, you can skip ahead 52a kretprobe. But if you're in a hurry to start using Kprobes, you
51to section 2. 53can skip ahead to section 2.
52 54
531.1 How Does a Kprobe Work? 551.1 How Does a Kprobe Work?
54 56
@@ -161,13 +163,125 @@ In case probed function is entered but there is no kretprobe_instance
161object available, then in addition to incrementing the nmissed count, 163object available, then in addition to incrementing the nmissed count,
162the user entry_handler invocation is also skipped. 164the user entry_handler invocation is also skipped.
163 165
1661.4 How Does Jump Optimization Work?
167
168If you configured your kernel with CONFIG_OPTPROBES=y (currently
169this option is supported on x86/x86-64, non-preemptive kernel) and
170the "debug.kprobes_optimization" kernel parameter is set to 1 (see
171sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
172instruction instead of a breakpoint instruction at each probepoint.
173
1741.4.1 Init a Kprobe
175
176When a probe is registered, before attempting this optimization,
177Kprobes inserts an ordinary, breakpoint-based kprobe at the specified
178address. So, even if it's not possible to optimize this particular
179probepoint, there'll be a probe there.
180
1811.4.2 Safety Check
182
183Before optimizing a probe, Kprobes performs the following safety checks:
184
185- Kprobes verifies that the region that will be replaced by the jump
186instruction (the "optimized region") lies entirely within one function.
187(A jump instruction is multiple bytes, and so may overlay multiple
188instructions.)
189
190- Kprobes analyzes the entire function and verifies that there is no
191jump into the optimized region. Specifically:
192 - the function contains no indirect jump;
193 - the function contains no instruction that causes an exception (since
194 the fixup code triggered by the exception could jump back into the
195 optimized region -- Kprobes checks the exception tables to verify this);
196 and
197 - there is no near jump to the optimized region (other than to the first
198 byte).
199
200- For each instruction in the optimized region, Kprobes verifies that
201the instruction can be executed out of line.
202
2031.4.3 Preparing Detour Buffer
204
205Next, Kprobes prepares a "detour" buffer, which contains the following
206instruction sequence:
207- code to push the CPU's registers (emulating a breakpoint trap)
208- a call to the trampoline code which calls user's probe handlers.
209- code to restore registers
210- the instructions from the optimized region
211- a jump back to the original execution path.
212
2131.4.4 Pre-optimization
214
215After preparing the detour buffer, Kprobes verifies that none of the
216following situations exist:
217- The probe has either a break_handler (i.e., it's a jprobe) or a
218post_handler.
219- Other instructions in the optimized region are probed.
220- The probe is disabled.
221In any of the above cases, Kprobes won't start optimizing the probe.
222Since these are temporary situations, Kprobes tries to start
223optimizing it again if the situation is changed.
224
225If the kprobe can be optimized, Kprobes enqueues the kprobe to an
226optimizing list, and kicks the kprobe-optimizer workqueue to optimize
227it. If the to-be-optimized probepoint is hit before being optimized,
228Kprobes returns control to the original instruction path by setting
229the CPU's instruction pointer to the copied code in the detour buffer
230-- thus at least avoiding the single-step.
231
2321.4.5 Optimization
233
234The Kprobe-optimizer doesn't insert the jump instruction immediately;
235rather, it calls synchronize_sched() for safety first, because it's
236possible for a CPU to be interrupted in the middle of executing the
237optimized region(*). As you know, synchronize_sched() can ensure
238that all interruptions that were active when synchronize_sched()
239was called are done, but only if CONFIG_PREEMPT=n. So, this version
240of kprobe optimization supports only kernels with CONFIG_PREEMPT=n.(**)
241
242After that, the Kprobe-optimizer calls stop_machine() to replace
243the optimized region with a jump instruction to the detour buffer,
244using text_poke_smp().
245
2461.4.6 Unoptimization
247
248When an optimized kprobe is unregistered, disabled, or blocked by
249another kprobe, it will be unoptimized. If this happens before
250the optimization is complete, the kprobe is just dequeued from the
251optimized list. If the optimization has been done, the jump is
252replaced with the original code (except for an int3 breakpoint in
253the first byte) by using text_poke_smp().
254
255(*)Please imagine that the 2nd instruction is interrupted and then
256the optimizer replaces the 2nd instruction with the jump *address*
257while the interrupt handler is running. When the interrupt
258returns to original address, there is no valid instruction,
259and it causes an unexpected result.
260
261(**)This optimization-safety checking may be replaced with the
262stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y
263kernel.
264
265NOTE for geeks:
266The jump optimization changes the kprobe's pre_handler behavior.
267Without optimization, the pre_handler can change the kernel's execution
268path by changing regs->ip and returning 1. However, when the probe
269is optimized, that modification is ignored. Thus, if you want to
270tweak the kernel's execution path, you need to suppress optimization,
271using one of the following techniques:
272- Specify an empty function for the kprobe's post_handler or break_handler.
273 or
274- Config CONFIG_OPTPROBES=n.
275 or
276- Execute 'sysctl -w debug.kprobes_optimization=n'
277
1642. Architectures Supported 2782. Architectures Supported
165 279
166Kprobes, jprobes, and return probes are implemented on the following 280Kprobes, jprobes, and return probes are implemented on the following
167architectures: 281architectures:
168 282
169- i386 283- i386 (Supports jump optimization)
170- x86_64 (AMD-64, EM64T) 284- x86_64 (AMD-64, EM64T) (Supports jump optimization)
171- ppc64 285- ppc64
172- ia64 (Does not support probes on instruction slot1.) 286- ia64 (Does not support probes on instruction slot1.)
173- sparc64 (Return probes not yet implemented.) 287- sparc64 (Return probes not yet implemented.)
@@ -193,6 +307,10 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
193so you can use "objdump -d -l vmlinux" to see the source-to-object 307so you can use "objdump -d -l vmlinux" to see the source-to-object
194code mapping. 308code mapping.
195 309
310If you want to reduce probing overhead, set "Kprobes jump optimization
311support" (CONFIG_OPTPROBES) to "y". You can find this option under the
312"Kprobes" line.
313
1964. API Reference 3144. API Reference
197 315
198The Kprobes API includes a "register" function and an "unregister" 316The Kprobes API includes a "register" function and an "unregister"
@@ -389,7 +507,10 @@ the probe which has been registered.
389 507
390Kprobes allows multiple probes at the same address. Currently, 508Kprobes allows multiple probes at the same address. Currently,
391however, there cannot be multiple jprobes on the same function at 509however, there cannot be multiple jprobes on the same function at
392the same time. 510the same time. Also, a probepoint for which there is a jprobe or
511a post_handler cannot be optimized. So if you install a jprobe,
512or a kprobe with a post_handler, at an optimized probepoint, the
513probepoint will be unoptimized automatically.
393 514
394In general, you can install a probe anywhere in the kernel. 515In general, you can install a probe anywhere in the kernel.
395In particular, you can probe interrupt handlers. Known exceptions 516In particular, you can probe interrupt handlers. Known exceptions
@@ -453,6 +574,38 @@ reason, Kprobes doesn't support return probes (or kprobes or jprobes)
453on the x86_64 version of __switch_to(); the registration functions 574on the x86_64 version of __switch_to(); the registration functions
454return -EINVAL. 575return -EINVAL.
455 576
577On x86/x86-64, since the Jump Optimization of Kprobes modifies
578instructions widely, there are some limitations to optimization. To
579explain it, we introduce some terminology. Imagine a 3-instruction
580sequence consisting of a two 2-byte instructions and one 3-byte
581instruction.
582
583 IA
584 |
585[-2][-1][0][1][2][3][4][5][6][7]
586 [ins1][ins2][ ins3 ]
587 [<- DCR ->]
588 [<- JTPR ->]
589
590ins1: 1st Instruction
591ins2: 2nd Instruction
592ins3: 3rd Instruction
593IA: Insertion Address
594JTPR: Jump Target Prohibition Region
595DCR: Detoured Code Region
596
597The instructions in DCR are copied to the out-of-line buffer
598of the kprobe, because the bytes in DCR are replaced by
599a 5-byte jump instruction. So there are several limitations.
600
601a) The instructions in DCR must be relocatable.
602b) The instructions in DCR must not include a call instruction.
603c) JTPR must not be targeted by any jump or call instruction.
604d) DCR must not straddle the border betweeen functions.
605
606Anyway, these limitations are checked by the in-kernel instruction
607decoder, so you don't need to worry about that.
608
4566. Probe Overhead 6096. Probe Overhead
457 610
458On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0 611On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0
@@ -476,6 +629,19 @@ k = 0.49 usec; j = 0.76; r = 0.80; kr = 0.82; jr = 1.07
476ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU) 629ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU)
477k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99 630k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99
478 631
6326.1 Optimized Probe Overhead
633
634Typically, an optimized kprobe hit takes 0.07 to 0.1 microseconds to
635process. Here are sample overhead figures (in usec) for x86 architectures.
636k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe,
637r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe.
638
639i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
640k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33
641
642x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips
643k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30
644
4797. TODO 6457. TODO
480 646
481a. SystemTap (http://sourceware.org/systemtap): Provides a simplified 647a. SystemTap (http://sourceware.org/systemtap): Provides a simplified
@@ -523,7 +689,8 @@ is also specified. Following columns show probe status. If the probe is on
523a virtual address that is no longer valid (module init sections, module 689a virtual address that is no longer valid (module init sections, module
524virtual addresses that correspond to modules that've been unloaded), 690virtual addresses that correspond to modules that've been unloaded),
525such probes are marked with [GONE]. If the probe is temporarily disabled, 691such probes are marked with [GONE]. If the probe is temporarily disabled,
526such probes are marked with [DISABLED]. 692such probes are marked with [DISABLED]. If the probe is optimized, it is
693marked with [OPTIMIZED].
527 694
528/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. 695/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
529 696
@@ -533,3 +700,19 @@ registered probes will be disarmed, till such time a "1" is echoed to this
533file. Note that this knob just disarms and arms all kprobes and doesn't 700file. Note that this knob just disarms and arms all kprobes and doesn't
534change each probe's disabling state. This means that disabled kprobes (marked 701change each probe's disabling state. This means that disabled kprobes (marked
535[DISABLED]) will be not enabled if you turn ON all kprobes by this knob. 702[DISABLED]) will be not enabled if you turn ON all kprobes by this knob.
703
704
705Appendix B: The kprobes sysctl interface
706
707/proc/sys/debug/kprobes-optimization: Turn kprobes optimization ON/OFF.
708
709When CONFIG_OPTPROBES=y, this sysctl interface appears and it provides
710a knob to globally and forcibly turn jump optimization (see section
7111.4) ON or OFF. By default, jump optimization is allowed (ON).
712If you echo "0" to this file or set "debug.kprobes_optimization" to
7130 via sysctl, all optimized probes will be unoptimized, and any new
714probes registered after that will not be optimized. Note that this
715knob *changes* the optimized state. This means that optimized probes
716(marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be
717removed). If the knob is turned on, they will be optimized again.
718
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index 2811e452f756..c6416a398163 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -23,12 +23,12 @@ of a virtual machine. The ioctls belong to three classes
23 Only run vcpu ioctls from the same thread that was used to create the 23 Only run vcpu ioctls from the same thread that was used to create the
24 vcpu. 24 vcpu.
25 25
262. File descritpors 262. File descriptors
27 27
28The kvm API is centered around file descriptors. An initial 28The kvm API is centered around file descriptors. An initial
29open("/dev/kvm") obtains a handle to the kvm subsystem; this handle 29open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
30can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this 30can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
31handle will create a VM file descripror which can be used to issue VM 31handle will create a VM file descriptor which can be used to issue VM
32ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu 32ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
33and return a file descriptor pointing to it. Finally, ioctls on a vcpu 33and return a file descriptor pointing to it. Finally, ioctls on a vcpu
34fd can be used to control the vcpu, including the important task of 34fd can be used to control the vcpu, including the important task of
@@ -643,7 +643,7 @@ Type: vm ioctl
643Parameters: struct kvm_clock_data (in) 643Parameters: struct kvm_clock_data (in)
644Returns: 0 on success, -1 on error 644Returns: 0 on success, -1 on error
645 645
646Sets the current timestamp of kvmclock to the valued specific in its parameter. 646Sets the current timestamp of kvmclock to the value specified in its parameter.
647In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios 647In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
648such as migration. 648such as migration.
649 649
@@ -795,11 +795,11 @@ Unused.
795 __u64 data_offset; /* relative to kvm_run start */ 795 __u64 data_offset; /* relative to kvm_run start */
796 } io; 796 } io;
797 797
798If exit_reason is KVM_EXIT_IO_IN or KVM_EXIT_IO_OUT, then the vcpu has 798If exit_reason is KVM_EXIT_IO, then the vcpu has
799executed a port I/O instruction which could not be satisfied by kvm. 799executed a port I/O instruction which could not be satisfied by kvm.
800data_offset describes where the data is located (KVM_EXIT_IO_OUT) or 800data_offset describes where the data is located (KVM_EXIT_IO_OUT) or
801where kvm expects application code to place the data for the next 801where kvm expects application code to place the data for the next
802KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a patcked array. 802KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array.
803 803
804 struct { 804 struct {
805 struct kvm_debug_exit_arch arch; 805 struct kvm_debug_exit_arch arch;
@@ -815,7 +815,7 @@ Unused.
815 __u8 is_write; 815 __u8 is_write;
816 } mmio; 816 } mmio;
817 817
818If exit_reason is KVM_EXIT_MMIO or KVM_EXIT_IO_OUT, then the vcpu has 818If exit_reason is KVM_EXIT_MMIO, then the vcpu has
819executed a memory-mapped I/O instruction which could not be satisfied 819executed a memory-mapped I/O instruction which could not be satisfied
820by kvm. The 'data' member contains the written data if 'is_write' is 820by kvm. The 'data' member contains the written data if 'is_write' is
821true, and should be filled by application code otherwise. 821true, and should be filled by application code otherwise.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 75afa1229fd7..39c0a09d0105 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -650,6 +650,10 @@ LCD, CRT or DVI (if available). The following commands are available:
650 echo expand_toggle > /proc/acpi/ibm/video 650 echo expand_toggle > /proc/acpi/ibm/video
651 echo video_switch > /proc/acpi/ibm/video 651 echo video_switch > /proc/acpi/ibm/video
652 652
653NOTE: Access to this feature is restricted to processes owning the
654CAP_SYS_ADMIN capability for safety reasons, as it can interact badly
655enough with some versions of X.org to crash it.
656
653Each video output device can be enabled or disabled individually. 657Each video output device can be enabled or disabled individually.
654Reading /proc/acpi/ibm/video shows the status of each device. 658Reading /proc/acpi/ibm/video shows the status of each device.
655 659
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 50189bf07d53..fe5c099b8fc8 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -32,6 +32,8 @@ cs89x0.txt
32 - the Crystal LAN (CS8900/20-based) Ethernet ISA adapter driver 32 - the Crystal LAN (CS8900/20-based) Ethernet ISA adapter driver
33cxacru.txt 33cxacru.txt
34 - Conexant AccessRunner USB ADSL Modem 34 - Conexant AccessRunner USB ADSL Modem
35cxacru-cf.py
36 - Conexant AccessRunner USB ADSL Modem configuration file parser
35de4x5.txt 37de4x5.txt
36 - the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver 38 - the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
37decnet.txt 39decnet.txt
diff --git a/Documentation/networking/cxacru-cf.py b/Documentation/networking/cxacru-cf.py
new file mode 100644
index 000000000000..b41d298398c8
--- /dev/null
+++ b/Documentation/networking/cxacru-cf.py
@@ -0,0 +1,48 @@
1#!/usr/bin/env python
2# Copyright 2009 Simon Arlott
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms of the GNU General Public License as published by the Free
6# Software Foundation; either version 2 of the License, or (at your option)
7# any later version.
8#
9# This program is distributed in the hope that it will be useful, but WITHOUT
10# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12# more details.
13#
14# You should have received a copy of the GNU General Public License along with
15# this program; if not, write to the Free Software Foundation, Inc., 59
16# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17#
18# Usage: cxacru-cf.py < cxacru-cf.bin
19# Output: values string suitable for the sysfs adsl_config attribute
20#
21# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
22# contains mis-aligned values which will stop the modem from being able
23# to make a connection. If the first and last two bytes are removed then
24# the values become valid, but the modulation will be forced to ANSI
25# T1.413 only which may not be appropriate.
26#
27# The original binary format is a packed list of le32 values.
28
29import sys
30import struct
31
32i = 0
33while True:
34 buf = sys.stdin.read(4)
35
36 if len(buf) == 0:
37 break
38 elif len(buf) != 4:
39 sys.stdout.write("\n")
40 sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
41 sys.exit(1)
42
43 if i > 0:
44 sys.stdout.write(" ")
45 sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
46 i += 1
47
48sys.stdout.write("\n")
diff --git a/Documentation/networking/cxacru.txt b/Documentation/networking/cxacru.txt
index b074681a963e..2cce04457b4d 100644
--- a/Documentation/networking/cxacru.txt
+++ b/Documentation/networking/cxacru.txt
@@ -4,6 +4,12 @@ While it is capable of managing/maintaining the ADSL connection without the
4module loaded, the device will sometimes stop responding after unloading the 4module loaded, the device will sometimes stop responding after unloading the
5driver and it is necessary to unplug/remove power to the device to fix this. 5driver and it is necessary to unplug/remove power to the device to fix this.
6 6
7Note: support for cxacru-cf.bin has been removed. It was not loaded correctly
8so it had no effect on the device configuration. Fixing it could have stopped
9existing devices working when an invalid configuration is supplied.
10
11There is a script cxacru-cf.py to convert an existing file to the sysfs form.
12
7Detected devices will appear as ATM devices named "cxacru". In /sys/class/atm/ 13Detected devices will appear as ATM devices named "cxacru". In /sys/class/atm/
8these are directories named cxacruN where N is the device number. A symlink 14these are directories named cxacruN where N is the device number. A symlink
9named device points to the USB interface device's directory which contains 15named device points to the USB interface device's directory which contains
@@ -15,6 +21,15 @@ several sysfs attribute files for retrieving device statistics:
15* adsl_headend_environment 21* adsl_headend_environment
16 Information about the remote headend. 22 Information about the remote headend.
17 23
24* adsl_config
25 Configuration writing interface.
26 Write parameters in hexadecimal format <index>=<value>,
27 separated by whitespace, e.g.:
28 "1=0 a=5"
29 Up to 7 parameters at a time will be sent and the modem will restart
30 the ADSL connection when any value is set. These are logged for future
31 reference.
32
18* downstream_attenuation (dB) 33* downstream_attenuation (dB)
19* downstream_bits_per_frame 34* downstream_bits_per_frame
20* downstream_rate (kbps) 35* downstream_rate (kbps)
@@ -61,6 +76,7 @@ several sysfs attribute files for retrieving device statistics:
61* mac_address 76* mac_address
62 77
63* modulation 78* modulation
79 "" (when not connected)
64 "ANSI T1.413" 80 "ANSI T1.413"
65 "ITU-T G.992.1 (G.DMT)" 81 "ITU-T G.992.1 (G.DMT)"
66 "ITU-T G.992.2 (G.LITE)" 82 "ITU-T G.992.2 (G.LITE)"
diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt
index 0732cdd05ba1..2a4b4bce6110 100644
--- a/Documentation/powerpc/dts-bindings/fsl/dma.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt
@@ -44,21 +44,29 @@ Example:
44 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; 44 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
45 cell-index = <0>; 45 cell-index = <0>;
46 reg = <0 0x80>; 46 reg = <0 0x80>;
47 interrupt-parent = <&ipic>;
48 interrupts = <71 8>;
47 }; 49 };
48 dma-channel@80 { 50 dma-channel@80 {
49 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; 51 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
50 cell-index = <1>; 52 cell-index = <1>;
51 reg = <0x80 0x80>; 53 reg = <0x80 0x80>;
54 interrupt-parent = <&ipic>;
55 interrupts = <71 8>;
52 }; 56 };
53 dma-channel@100 { 57 dma-channel@100 {
54 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; 58 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
55 cell-index = <2>; 59 cell-index = <2>;
56 reg = <0x100 0x80>; 60 reg = <0x100 0x80>;
61 interrupt-parent = <&ipic>;
62 interrupts = <71 8>;
57 }; 63 };
58 dma-channel@180 { 64 dma-channel@180 {
59 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; 65 compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
60 cell-index = <3>; 66 cell-index = <3>;
61 reg = <0x180 0x80>; 67 reg = <0x180 0x80>;
68 interrupt-parent = <&ipic>;
69 interrupts = <71 8>;
62 }; 70 };
63 }; 71 };
64 72
diff --git a/Documentation/usb/error-codes.txt b/Documentation/usb/error-codes.txt
index 9cf83e8c27b8..d83703ea74b2 100644
--- a/Documentation/usb/error-codes.txt
+++ b/Documentation/usb/error-codes.txt
@@ -41,8 +41,8 @@ USB-specific:
41 41
42-EFBIG Host controller driver can't schedule that many ISO frames. 42-EFBIG Host controller driver can't schedule that many ISO frames.
43 43
44-EPIPE Specified endpoint is stalled. For non-control endpoints, 44-EPIPE The pipe type specified in the URB doesn't match the
45 reset this status with usb_clear_halt(). 45 endpoint's actual type.
46 46
47-EMSGSIZE (a) endpoint maxpacket size is zero; it is not usable 47-EMSGSIZE (a) endpoint maxpacket size is zero; it is not usable
48 in the current interface altsetting. 48 in the current interface altsetting.
@@ -60,6 +60,8 @@ USB-specific:
60 60
61-EHOSTUNREACH URB was rejected because the device is suspended. 61-EHOSTUNREACH URB was rejected because the device is suspended.
62 62
63-ENOEXEC A control URB doesn't contain a Setup packet.
64
63 65
64************************************************************************** 66**************************************************************************
65* Error codes returned by in urb->status * 67* Error codes returned by in urb->status *
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 3bf6818c8cf5..2790ad48cfc2 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
2 2
3 Alan Stern <stern@rowland.harvard.edu> 3 Alan Stern <stern@rowland.harvard.edu>
4 4
5 November 10, 2009 5 December 11, 2009
6 6
7 7
8 8
@@ -29,9 +29,9 @@ covered to some extent (see Documentation/power/*.txt for more
29information about system PM). 29information about system PM).
30 30
31Note: Dynamic PM support for USB is present only if the kernel was 31Note: Dynamic PM support for USB is present only if the kernel was
32built with CONFIG_USB_SUSPEND enabled. System PM support is present 32built with CONFIG_USB_SUSPEND enabled (which depends on
33only if the kernel was built with CONFIG_SUSPEND or CONFIG_HIBERNATION 33CONFIG_PM_RUNTIME). System PM support is present only if the kernel
34enabled. 34was built with CONFIG_SUSPEND or CONFIG_HIBERNATION enabled.
35 35
36 36
37 What is Remote Wakeup? 37 What is Remote Wakeup?
@@ -229,6 +229,11 @@ necessary operations by hand or add them to a udev script. You can
229also change the idle-delay time; 2 seconds is not the best choice for 229also change the idle-delay time; 2 seconds is not the best choice for
230every device. 230every device.
231 231
232If a driver knows that its device has proper suspend/resume support,
233it can enable autosuspend all by itself. For example, the video
234driver for a laptop's webcam might do this, since these devices are
235rarely used and so should normally be autosuspended.
236
232Sometimes it turns out that even when a device does work okay with 237Sometimes it turns out that even when a device does work okay with
233autosuspend there are still problems. For example, there are 238autosuspend there are still problems. For example, there are
234experimental patches adding autosuspend support to the usbhid driver, 239experimental patches adding autosuspend support to the usbhid driver,
@@ -321,69 +326,81 @@ driver does so by calling these six functions:
321 void usb_autopm_get_interface_no_resume(struct usb_interface *intf); 326 void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
322 void usb_autopm_put_interface_no_suspend(struct usb_interface *intf); 327 void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
323 328
324The functions work by maintaining a counter in the usb_interface 329The functions work by maintaining a usage counter in the
325structure. When intf->pm_usage_count is > 0 then the interface is 330usb_interface's embedded device structure. When the counter is > 0
326deemed to be busy, and the kernel will not autosuspend the interface's 331then the interface is deemed to be busy, and the kernel will not
327device. When intf->pm_usage_count is <= 0 then the interface is 332autosuspend the interface's device. When the usage counter is = 0
328considered to be idle, and the kernel may autosuspend the device. 333then the interface is considered to be idle, and the kernel may
334autosuspend the device.
329 335
330(There is a similar pm_usage_count field in struct usb_device, 336(There is a similar usage counter field in struct usb_device,
331associated with the device itself rather than any of its interfaces. 337associated with the device itself rather than any of its interfaces.
332This field is used only by the USB core.) 338This counter is used only by the USB core.)
333 339
334Drivers must not modify intf->pm_usage_count directly; its value 340Drivers need not be concerned about balancing changes to the usage
335should be changed only be using the functions listed above. Drivers 341counter; the USB core will undo any remaining "get"s when a driver
336are responsible for insuring that the overall change to pm_usage_count 342is unbound from its interface. As a corollary, drivers must not call
337during their lifetime balances out to 0 (it may be necessary for the 343any of the usb_autopm_* functions after their diconnect() routine has
338disconnect method to call usb_autopm_put_interface() one or more times 344returned.
339to fulfill this requirement). The first two routines use the PM mutex 345
340in struct usb_device for mutual exclusion; drivers using the async 346Drivers using the async routines are responsible for their own
341routines are responsible for their own synchronization and mutual 347synchronization and mutual exclusion.
342exclusion. 348
343 349 usb_autopm_get_interface() increments the usage counter and
344 usb_autopm_get_interface() increments pm_usage_count and 350 does an autoresume if the device is suspended. If the
345 attempts an autoresume if the new value is > 0 and the 351 autoresume fails, the counter is decremented back.
346 device is suspended. 352
347 353 usb_autopm_put_interface() decrements the usage counter and
348 usb_autopm_put_interface() decrements pm_usage_count and 354 attempts an autosuspend if the new value is = 0.
349 attempts an autosuspend if the new value is <= 0 and the
350 device isn't suspended.
351 355
352 usb_autopm_get_interface_async() and 356 usb_autopm_get_interface_async() and
353 usb_autopm_put_interface_async() do almost the same things as 357 usb_autopm_put_interface_async() do almost the same things as
354 their non-async counterparts. The differences are: they do 358 their non-async counterparts. The big difference is that they
355 not acquire the PM mutex, and they use a workqueue to do their 359 use a workqueue to do the resume or suspend part of their
356 jobs. As a result they can be called in an atomic context, 360 jobs. As a result they can be called in an atomic context,
357 such as an URB's completion handler, but when they return the 361 such as an URB's completion handler, but when they return the
358 device will not generally not yet be in the desired state. 362 device will generally not yet be in the desired state.
359 363
360 usb_autopm_get_interface_no_resume() and 364 usb_autopm_get_interface_no_resume() and
361 usb_autopm_put_interface_no_suspend() merely increment or 365 usb_autopm_put_interface_no_suspend() merely increment or
362 decrement the pm_usage_count value; they do not attempt to 366 decrement the usage counter; they do not attempt to carry out
363 carry out an autoresume or an autosuspend. Hence they can be 367 an autoresume or an autosuspend. Hence they can be called in
364 called in an atomic context. 368 an atomic context.
365 369
366The conventional usage pattern is that a driver calls 370The simplest usage pattern is that a driver calls
367usb_autopm_get_interface() in its open routine and 371usb_autopm_get_interface() in its open routine and
368usb_autopm_put_interface() in its close or release routine. But 372usb_autopm_put_interface() in its close or release routine. But other
369other patterns are possible. 373patterns are possible.
370 374
371The autosuspend attempts mentioned above will often fail for one 375The autosuspend attempts mentioned above will often fail for one
372reason or another. For example, the power/level attribute might be 376reason or another. For example, the power/level attribute might be
373set to "on", or another interface in the same device might not be 377set to "on", or another interface in the same device might not be
374idle. This is perfectly normal. If the reason for failure was that 378idle. This is perfectly normal. If the reason for failure was that
375the device hasn't been idle for long enough, a delayed workqueue 379the device hasn't been idle for long enough, a timer is scheduled to
376routine is automatically set up to carry out the operation when the 380carry out the operation automatically when the autosuspend idle-delay
377autosuspend idle-delay has expired. 381has expired.
378 382
379Autoresume attempts also can fail, although failure would mean that 383Autoresume attempts also can fail, although failure would mean that
380the device is no longer present or operating properly. Unlike 384the device is no longer present or operating properly. Unlike
381autosuspend, there's no delay for an autoresume. 385autosuspend, there's no idle-delay for an autoresume.
382 386
383 387
384 Other parts of the driver interface 388 Other parts of the driver interface
385 ----------------------------------- 389 -----------------------------------
386 390
391Drivers can enable autosuspend for their devices by calling
392
393 usb_enable_autosuspend(struct usb_device *udev);
394
395in their probe() routine, if they know that the device is capable of
396suspending and resuming correctly. This is exactly equivalent to
397writing "auto" to the device's power/level attribute. Likewise,
398drivers can disable autosuspend by calling
399
400 usb_disable_autosuspend(struct usb_device *udev);
401
402This is exactly the same as writing "on" to the power/level attribute.
403
387Sometimes a driver needs to make sure that remote wakeup is enabled 404Sometimes a driver needs to make sure that remote wakeup is enabled
388during autosuspend. For example, there's not much point 405during autosuspend. For example, there's not much point
389autosuspending a keyboard if the user can't cause the keyboard to do a 406autosuspending a keyboard if the user can't cause the keyboard to do a
@@ -395,26 +412,27 @@ though, setting this flag won't cause the kernel to autoresume it.
395Normally a driver would set this flag in its probe method, at which 412Normally a driver would set this flag in its probe method, at which
396time the device is guaranteed not to be autosuspended.) 413time the device is guaranteed not to be autosuspended.)
397 414
398The synchronous usb_autopm_* routines have to run in a sleepable 415If a driver does its I/O asynchronously in interrupt context, it
399process context; they must not be called from an interrupt handler or 416should call usb_autopm_get_interface_async() before starting output and
400while holding a spinlock. In fact, the entire autosuspend mechanism 417usb_autopm_put_interface_async() when the output queue drains. When
401is not well geared toward interrupt-driven operation. However there 418it receives an input event, it should call
402is one thing a driver can do in an interrupt handler:
403 419
404 usb_mark_last_busy(struct usb_device *udev); 420 usb_mark_last_busy(struct usb_device *udev);
405 421
406This sets udev->last_busy to the current time. udev->last_busy is the 422in the event handler. This sets udev->last_busy to the current time.
407field used for idle-delay calculations; updating it will cause any 423udev->last_busy is the field used for idle-delay calculations;
408pending autosuspend to be moved back. The usb_autopm_* routines will 424updating it will cause any pending autosuspend to be moved back. Most
409also set the last_busy field to the current time. 425of the usb_autopm_* routines will also set the last_busy field to the
410 426current time.
411Calling urb_mark_last_busy() from within an URB completion handler is 427
412subject to races: The kernel may have just finished deciding the 428Asynchronous operation is always subject to races. For example, a
413device has been idle for long enough but not yet gotten around to 429driver may call one of the usb_autopm_*_interface_async() routines at
414calling the driver's suspend method. The driver would have to be 430a time when the core has just finished deciding the device has been
415responsible for synchronizing its suspend method with its URB 431idle for long enough but not yet gotten around to calling the driver's
416completion handler and causing the autosuspend to fail with -EBUSY if 432suspend method. The suspend method must be responsible for
417an URB had completed too recently. 433synchronizing with the output request routine and the URB completion
434handler; it should cause autosuspends to fail with -EBUSY if the
435driver needs to use the device.
418 436
419External suspend calls should never be allowed to fail in this way, 437External suspend calls should never be allowed to fail in this way,
420only autosuspend calls. The driver can tell them apart by checking 438only autosuspend calls. The driver can tell them apart by checking
@@ -422,75 +440,23 @@ the PM_EVENT_AUTO bit in the message.event argument to the suspend
422method; this bit will be set for internal PM events (autosuspend) and 440method; this bit will be set for internal PM events (autosuspend) and
423clear for external PM events. 441clear for external PM events.
424 442
425Many of the ingredients in the autosuspend framework are oriented
426towards interfaces: The usb_interface structure contains the
427pm_usage_cnt field, and the usb_autopm_* routines take an interface
428pointer as their argument. But somewhat confusingly, a few of the
429pieces (i.e., usb_mark_last_busy()) use the usb_device structure
430instead. Drivers need to keep this straight; they can call
431interface_to_usbdev() to find the device structure for a given
432interface.
433
434 443
435 Locking requirements 444 Mutual exclusion
436 -------------------- 445 ----------------
437 446
438All three suspend/resume methods are always called while holding the 447For external events -- but not necessarily for autosuspend or
439usb_device's PM mutex. For external events -- but not necessarily for 448autoresume -- the device semaphore (udev->dev.sem) will be held when a
440autosuspend or autoresume -- the device semaphore (udev->dev.sem) will 449suspend or resume method is called. This implies that external
441also be held. This implies that external suspend/resume events are 450suspend/resume events are mutually exclusive with calls to probe,
442mutually exclusive with calls to probe, disconnect, pre_reset, and 451disconnect, pre_reset, and post_reset; the USB core guarantees that
443post_reset; the USB core guarantees that this is true of internal 452this is true of autosuspend/autoresume events as well.
444suspend/resume events as well.
445 453
446If a driver wants to block all suspend/resume calls during some 454If a driver wants to block all suspend/resume calls during some
447critical section, it can simply acquire udev->pm_mutex. Note that 455critical section, the best way is to lock the device and call
448calls to resume may be triggered indirectly. Block IO due to memory 456usb_autopm_get_interface() (and do the reverse at the end of the
449allocations can make the vm subsystem resume a device. Thus while 457critical section). Holding the device semaphore will block all
450holding this lock you must not allocate memory with GFP_KERNEL or 458external PM calls, and the usb_autopm_get_interface() will prevent any
451GFP_NOFS. 459internal PM calls, even if it fails. (Exercise: Why?)
452
453Alternatively, if the critical section might call some of the
454usb_autopm_* routines, the driver can avoid deadlock by doing:
455
456 down(&udev->dev.sem);
457 rc = usb_autopm_get_interface(intf);
458
459and at the end of the critical section:
460
461 if (!rc)
462 usb_autopm_put_interface(intf);
463 up(&udev->dev.sem);
464
465Holding the device semaphore will block all external PM calls, and the
466usb_autopm_get_interface() will prevent any internal PM calls, even if
467it fails. (Exercise: Why?)
468
469The rules for locking order are:
470
471 Never acquire any device semaphore while holding any PM mutex.
472
473 Never acquire udev->pm_mutex while holding the PM mutex for
474 a device that isn't a descendant of udev.
475
476In other words, PM mutexes should only be acquired going up the device
477tree, and they should be acquired only after locking all the device
478semaphores you need to hold. These rules don't matter to drivers very
479much; they usually affect just the USB core.
480
481Still, drivers do need to be careful. For example, many drivers use a
482private mutex to synchronize their normal I/O activities with their
483disconnect method. Now if the driver supports autosuspend then it
484must call usb_autopm_put_interface() from somewhere -- maybe from its
485close method. It should make the call while holding the private mutex,
486since a driver shouldn't call any of the usb_autopm_* functions for an
487interface from which it has been unbound.
488
489But the usb_autpm_* routines always acquire the device's PM mutex, and
490consequently the locking order has to be: private mutex first, PM
491mutex second. Since the suspend method is always called with the PM
492mutex held, it mustn't try to acquire the private mutex. It has to
493synchronize with the driver's I/O activities in some other way.
494 460
495 461
496 Interaction between dynamic PM and system PM 462 Interaction between dynamic PM and system PM
@@ -499,22 +465,11 @@ synchronize with the driver's I/O activities in some other way.
499Dynamic power management and system power management can interact in 465Dynamic power management and system power management can interact in
500a couple of ways. 466a couple of ways.
501 467
502Firstly, a device may already be manually suspended or autosuspended 468Firstly, a device may already be autosuspended when a system suspend
503when a system suspend occurs. Since system suspends are supposed to 469occurs. Since system suspends are supposed to be as transparent as
504be as transparent as possible, the device should remain suspended 470possible, the device should remain suspended following the system
505following the system resume. The 2.6.23 kernel obeys this principle 471resume. But this theory may not work out well in practice; over time
506for manually suspended devices but not for autosuspended devices; they 472the kernel's behavior in this regard has changed.
507do get resumed when the system wakes up. (Presumably they will be
508autosuspended again after their idle-delay time expires.) In later
509kernels this behavior will be fixed.
510
511(There is an exception. If a device would undergo a reset-resume
512instead of a normal resume, and the device is enabled for remote
513wakeup, then the reset-resume takes place even if the device was
514already suspended when the system suspend began. The justification is
515that a reset-resume is a kind of remote-wakeup event. Or to put it
516another way, a device which needs a reset won't be able to generate
517normal remote-wakeup signals, so it ought to be resumed immediately.)
518 473
519Secondly, a dynamic power-management event may occur as a system 474Secondly, a dynamic power-management event may occur as a system
520suspend is underway. The window for this is short, since system 475suspend is underway. The window for this is short, since system
diff --git a/MAINTAINERS b/MAINTAINERS
index 34f52a14e051..51d8b5221dd8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3173,7 +3173,7 @@ F: arch/x86/include/asm/svm.h
3173F: arch/x86/kvm/svm.c 3173F: arch/x86/kvm/svm.c
3174 3174
3175KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC 3175KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
3176M: Hollis Blanchard <hollisb@us.ibm.com> 3176M: Alexander Graf <agraf@suse.de>
3177L: kvm-ppc@vger.kernel.org 3177L: kvm-ppc@vger.kernel.org
3178W: http://kvm.qumranet.com 3178W: http://kvm.qumranet.com
3179S: Supported 3179S: Supported
@@ -6097,6 +6097,7 @@ F: arch/x86/
6097X86 PLATFORM DRIVERS 6097X86 PLATFORM DRIVERS
6098M: Matthew Garrett <mjg@redhat.com> 6098M: Matthew Garrett <mjg@redhat.com>
6099L: platform-driver-x86@vger.kernel.org 6099L: platform-driver-x86@vger.kernel.org
6100T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
6100S: Maintained 6101S: Maintained
6101F: drivers/platform/x86 6102F: drivers/platform/x86
6102 6103
diff --git a/arch/Kconfig b/arch/Kconfig
index 215e46073c45..e5eb1337a537 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -41,6 +41,17 @@ config KPROBES
41 for kernel debugging, non-intrusive instrumentation and testing. 41 for kernel debugging, non-intrusive instrumentation and testing.
42 If in doubt, say "N". 42 If in doubt, say "N".
43 43
44config OPTPROBES
45 bool "Kprobes jump optimization support (EXPERIMENTAL)"
46 default y
47 depends on KPROBES
48 depends on !PREEMPT
49 depends on HAVE_OPTPROBES
50 select KALLSYMS_ALL
51 help
52 This option will allow kprobes to optimize breakpoint to
53 a jump for reducing its overhead.
54
44config HAVE_EFFICIENT_UNALIGNED_ACCESS 55config HAVE_EFFICIENT_UNALIGNED_ACCESS
45 bool 56 bool
46 help 57 help
@@ -83,6 +94,8 @@ config HAVE_KPROBES
83config HAVE_KRETPROBES 94config HAVE_KRETPROBES
84 bool 95 bool
85 96
97config HAVE_OPTPROBES
98 bool
86# 99#
87# An arch should select this if it provides all these things: 100# An arch should select this if it provides all these things:
88# 101#
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index 6ad3ea696421..b9e3e3318371 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
98#define __local_add(i,l) ((l)->a.counter+=(i)) 98#define __local_add(i,l) ((l)->a.counter+=(i))
99#define __local_sub(i,l) ((l)->a.counter-=(i)) 99#define __local_sub(i,l) ((l)->a.counter-=(i))
100 100
101/* Use these for per-cpu local_t variables: on some archs they are
102 * much more efficient than these naive implementations. Note they take
103 * a variable, not an address.
104 */
105#define cpu_local_read(l) local_read(&__get_cpu_var(l))
106#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
107
108#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
109#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
110#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
111#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
112
113#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
114#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
115#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
116#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
117
118#endif /* _ALPHA_LOCAL_H */ 101#endif /* _ALPHA_LOCAL_H */
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 426ae948aefe..193bd334fbbf 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -445,6 +445,8 @@ CONFIG_IP_NF_FILTER=m
445# CONFIG_LAPB is not set 445# CONFIG_LAPB is not set
446# CONFIG_ECONET is not set 446# CONFIG_ECONET is not set
447# CONFIG_WAN_ROUTER is not set 447# CONFIG_WAN_ROUTER is not set
448CONFIG_PHONET=y
449# CONFIG_IEEE802154 is not set
448# CONFIG_NET_SCHED is not set 450# CONFIG_NET_SCHED is not set
449# CONFIG_DCB is not set 451# CONFIG_DCB is not set
450 452
@@ -1325,27 +1327,34 @@ CONFIG_USB_GADGET_SELECTED=y
1325# CONFIG_USB_GADGET_LH7A40X is not set 1327# CONFIG_USB_GADGET_LH7A40X is not set
1326# CONFIG_USB_GADGET_OMAP is not set 1328# CONFIG_USB_GADGET_OMAP is not set
1327# CONFIG_USB_GADGET_PXA25X is not set 1329# CONFIG_USB_GADGET_PXA25X is not set
1330# CONFIG_USB_GADGET_R8A66597 is not set
1328# CONFIG_USB_GADGET_PXA27X is not set 1331# CONFIG_USB_GADGET_PXA27X is not set
1329# CONFIG_USB_GADGET_S3C2410 is not set 1332# CONFIG_USB_GADGET_S3C_HSOTG is not set
1330# CONFIG_USB_GADGET_IMX is not set 1333# CONFIG_USB_GADGET_IMX is not set
1334# CONFIG_USB_GADGET_S3C2410 is not set
1331# CONFIG_USB_GADGET_M66592 is not set 1335# CONFIG_USB_GADGET_M66592 is not set
1332# CONFIG_USB_GADGET_AMD5536UDC is not set 1336# CONFIG_USB_GADGET_AMD5536UDC is not set
1333# CONFIG_USB_GADGET_FSL_QE is not set 1337# CONFIG_USB_GADGET_FSL_QE is not set
1334# CONFIG_USB_GADGET_CI13XXX is not set 1338# CONFIG_USB_GADGET_CI13XXX is not set
1335# CONFIG_USB_GADGET_NET2280 is not set 1339# CONFIG_USB_GADGET_NET2280 is not set
1336# CONFIG_USB_GADGET_GOKU is not set 1340# CONFIG_USB_GADGET_GOKU is not set
1341# CONFIG_USB_GADGET_LANGWELL is not set
1337# CONFIG_USB_GADGET_DUMMY_HCD is not set 1342# CONFIG_USB_GADGET_DUMMY_HCD is not set
1338CONFIG_USB_GADGET_DUALSPEED=y 1343CONFIG_USB_GADGET_DUALSPEED=y
1339CONFIG_USB_ZERO=m 1344CONFIG_USB_ZERO=m
1340# CONFIG_USB_ZERO_HNPTEST is not set 1345# CONFIG_USB_ZERO_HNPTEST is not set
1346# CONFIG_USB_AUDIO is not set
1341# CONFIG_USB_ETH is not set 1347# CONFIG_USB_ETH is not set
1342# CONFIG_USB_GADGETFS is not set 1348# CONFIG_USB_GADGETFS is not set
1343CONFIG_USB_FILE_STORAGE=m 1349CONFIG_USB_FILE_STORAGE=m
1344# CONFIG_USB_FILE_STORAGE_TEST is not set 1350# CONFIG_USB_FILE_STORAGE_TEST is not set
1351# CONFIG_USB_MASS_STORAGE is not set
1345# CONFIG_USB_G_SERIAL is not set 1352# CONFIG_USB_G_SERIAL is not set
1346# CONFIG_USB_MIDI_GADGET is not set 1353# CONFIG_USB_MIDI_GADGET is not set
1347# CONFIG_USB_G_PRINTER is not set 1354# CONFIG_USB_G_PRINTER is not set
1348# CONFIG_USB_CDC_COMPOSITE is not set 1355# CONFIG_USB_CDC_COMPOSITE is not set
1356CONFIG_USB_G_NOKIA=m
1357# CONFIG_USB_G_MULTI is not set
1349 1358
1350# 1359#
1351# OTG and related infrastructure 1360# OTG and related infrastructure
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 1a8c7279a28b..9b28f1243bdc 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
366 slot_cnt += *slots_per_op; 366 slot_cnt += *slots_per_op;
367 } 367 }
368 368
369 if (len) 369 slot_cnt += *slots_per_op;
370 slot_cnt += *slots_per_op;
371 370
372 return slot_cnt; 371 return slot_cnt;
373} 372}
@@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
389 slot_cnt += *slots_per_op; 388 slot_cnt += *slots_per_op;
390 } 389 }
391 390
392 if (len) 391 slot_cnt += *slots_per_op;
393 slot_cnt += *slots_per_op;
394 392
395 return slot_cnt; 393 return slot_cnt;
396} 394}
@@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
737 i += slots_per_op; 735 i += slots_per_op;
738 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); 736 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
739 737
740 if (len) { 738 iter = iop_hw_desc_slot_idx(hw_desc, i);
741 iter = iop_hw_desc_slot_idx(hw_desc, i); 739 iter->byte_count = len;
742 iter->byte_count = len;
743 }
744 } 740 }
745} 741}
746 742
diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
index 3d398ce09b31..3956d82b7c4e 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-mx2/devices.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/dma-mapping.h>
34 35
35#include <mach/irqs.h> 36#include <mach/irqs.h>
36#include <mach/hardware.h> 37#include <mach/hardware.h>
@@ -292,7 +293,7 @@ struct platform_device mxc_fb_device = {
292 .num_resources = ARRAY_SIZE(mxc_fb), 293 .num_resources = ARRAY_SIZE(mxc_fb),
293 .resource = mxc_fb, 294 .resource = mxc_fb,
294 .dev = { 295 .dev = {
295 .coherent_dma_mask = 0xFFFFFFFF, 296 .coherent_dma_mask = DMA_BIT_MASK(32),
296 }, 297 },
297}; 298};
298 299
@@ -395,17 +396,17 @@ static struct resource mxc_sdhc1_resources[] = {
395 }, 396 },
396}; 397};
397 398
398static u64 mxc_sdhc1_dmamask = 0xffffffffUL; 399static u64 mxc_sdhc1_dmamask = DMA_BIT_MASK(32);
399 400
400struct platform_device mxc_sdhc_device0 = { 401struct platform_device mxc_sdhc_device0 = {
401 .name = "mxc-mmc", 402 .name = "mxc-mmc",
402 .id = 0, 403 .id = 0,
403 .dev = { 404 .dev = {
404 .dma_mask = &mxc_sdhc1_dmamask, 405 .dma_mask = &mxc_sdhc1_dmamask,
405 .coherent_dma_mask = 0xffffffff, 406 .coherent_dma_mask = DMA_BIT_MASK(32),
406 }, 407 },
407 .num_resources = ARRAY_SIZE(mxc_sdhc1_resources), 408 .num_resources = ARRAY_SIZE(mxc_sdhc1_resources),
408 .resource = mxc_sdhc1_resources, 409 .resource = mxc_sdhc1_resources,
409}; 410};
410 411
411static struct resource mxc_sdhc2_resources[] = { 412static struct resource mxc_sdhc2_resources[] = {
@@ -424,17 +425,17 @@ static struct resource mxc_sdhc2_resources[] = {
424 }, 425 },
425}; 426};
426 427
427static u64 mxc_sdhc2_dmamask = 0xffffffffUL; 428static u64 mxc_sdhc2_dmamask = DMA_BIT_MASK(32);
428 429
429struct platform_device mxc_sdhc_device1 = { 430struct platform_device mxc_sdhc_device1 = {
430 .name = "mxc-mmc", 431 .name = "mxc-mmc",
431 .id = 1, 432 .id = 1,
432 .dev = { 433 .dev = {
433 .dma_mask = &mxc_sdhc2_dmamask, 434 .dma_mask = &mxc_sdhc2_dmamask,
434 .coherent_dma_mask = 0xffffffff, 435 .coherent_dma_mask = DMA_BIT_MASK(32),
435 }, 436 },
436 .num_resources = ARRAY_SIZE(mxc_sdhc2_resources), 437 .num_resources = ARRAY_SIZE(mxc_sdhc2_resources),
437 .resource = mxc_sdhc2_resources, 438 .resource = mxc_sdhc2_resources,
438}; 439};
439 440
440#ifdef CONFIG_MACH_MX27 441#ifdef CONFIG_MACH_MX27
@@ -450,7 +451,7 @@ static struct resource otg_resources[] = {
450 }, 451 },
451}; 452};
452 453
453static u64 otg_dmamask = 0xffffffffUL; 454static u64 otg_dmamask = DMA_BIT_MASK(32);
454 455
455/* OTG gadget device */ 456/* OTG gadget device */
456struct platform_device mxc_otg_udc_device = { 457struct platform_device mxc_otg_udc_device = {
@@ -458,7 +459,7 @@ struct platform_device mxc_otg_udc_device = {
458 .id = -1, 459 .id = -1,
459 .dev = { 460 .dev = {
460 .dma_mask = &otg_dmamask, 461 .dma_mask = &otg_dmamask,
461 .coherent_dma_mask = 0xffffffffUL, 462 .coherent_dma_mask = DMA_BIT_MASK(32),
462 }, 463 },
463 .resource = otg_resources, 464 .resource = otg_resources,
464 .num_resources = ARRAY_SIZE(otg_resources), 465 .num_resources = ARRAY_SIZE(otg_resources),
@@ -469,7 +470,7 @@ struct platform_device mxc_otg_host = {
469 .name = "mxc-ehci", 470 .name = "mxc-ehci",
470 .id = 0, 471 .id = 0,
471 .dev = { 472 .dev = {
472 .coherent_dma_mask = 0xffffffff, 473 .coherent_dma_mask = DMA_BIT_MASK(32),
473 .dma_mask = &otg_dmamask, 474 .dma_mask = &otg_dmamask,
474 }, 475 },
475 .resource = otg_resources, 476 .resource = otg_resources,
@@ -478,7 +479,7 @@ struct platform_device mxc_otg_host = {
478 479
479/* USB host 1 */ 480/* USB host 1 */
480 481
481static u64 usbh1_dmamask = 0xffffffffUL; 482static u64 usbh1_dmamask = DMA_BIT_MASK(32);
482 483
483static struct resource mxc_usbh1_resources[] = { 484static struct resource mxc_usbh1_resources[] = {
484 { 485 {
@@ -496,7 +497,7 @@ struct platform_device mxc_usbh1 = {
496 .name = "mxc-ehci", 497 .name = "mxc-ehci",
497 .id = 1, 498 .id = 1,
498 .dev = { 499 .dev = {
499 .coherent_dma_mask = 0xffffffff, 500 .coherent_dma_mask = DMA_BIT_MASK(32),
500 .dma_mask = &usbh1_dmamask, 501 .dma_mask = &usbh1_dmamask,
501 }, 502 },
502 .resource = mxc_usbh1_resources, 503 .resource = mxc_usbh1_resources,
@@ -504,7 +505,7 @@ struct platform_device mxc_usbh1 = {
504}; 505};
505 506
506/* USB host 2 */ 507/* USB host 2 */
507static u64 usbh2_dmamask = 0xffffffffUL; 508static u64 usbh2_dmamask = DMA_BIT_MASK(32);
508 509
509static struct resource mxc_usbh2_resources[] = { 510static struct resource mxc_usbh2_resources[] = {
510 { 511 {
@@ -522,7 +523,7 @@ struct platform_device mxc_usbh2 = {
522 .name = "mxc-ehci", 523 .name = "mxc-ehci",
523 .id = 2, 524 .id = 2,
524 .dev = { 525 .dev = {
525 .coherent_dma_mask = 0xffffffff, 526 .coherent_dma_mask = DMA_BIT_MASK(32),
526 .dma_mask = &usbh2_dmamask, 527 .dma_mask = &usbh2_dmamask,
527 }, 528 },
528 .resource = mxc_usbh2_resources, 529 .resource = mxc_usbh2_resources,
@@ -642,3 +643,30 @@ int __init mxc_register_gpios(void)
642{ 643{
643 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports)); 644 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
644} 645}
646
647#ifdef CONFIG_MACH_MX21
648static struct resource mx21_usbhc_resources[] = {
649 {
650 .start = USBOTG_BASE_ADDR,
651 .end = USBOTG_BASE_ADDR + 0x1FFF,
652 .flags = IORESOURCE_MEM,
653 },
654 {
655 .start = MXC_INT_USBHOST,
656 .end = MXC_INT_USBHOST,
657 .flags = IORESOURCE_IRQ,
658 },
659};
660
661struct platform_device mx21_usbhc_device = {
662 .name = "imx21-hcd",
663 .id = 0,
664 .dev = {
665 .dma_mask = &mx21_usbhc_device.dev.coherent_dma_mask,
666 .coherent_dma_mask = DMA_BIT_MASK(32),
667 },
668 .num_resources = ARRAY_SIZE(mx21_usbhc_resources),
669 .resource = mx21_usbhc_resources,
670};
671#endif
672
diff --git a/arch/arm/mach-mx2/devices.h b/arch/arm/mach-mx2/devices.h
index 97306aa18f1c..f12694b07369 100644
--- a/arch/arm/mach-mx2/devices.h
+++ b/arch/arm/mach-mx2/devices.h
@@ -26,5 +26,6 @@ extern struct platform_device mxc_usbh2;
26extern struct platform_device mxc_spi_device0; 26extern struct platform_device mxc_spi_device0;
27extern struct platform_device mxc_spi_device1; 27extern struct platform_device mxc_spi_device1;
28extern struct platform_device mxc_spi_device2; 28extern struct platform_device mxc_spi_device2;
29extern struct platform_device mx21_usbhc_device;
29extern struct platform_device imx_ssi_device0; 30extern struct platform_device imx_ssi_device0;
30extern struct platform_device imx_ssi_device1; 31extern struct platform_device imx_ssi_device1;
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
index f4cfee9c7d28..b8155b4e5ffa 100644
--- a/arch/arm/mach-u300/include/mach/coh901318.h
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -53,7 +53,7 @@ struct coh901318_params {
53 * struct coh_dma_channel - dma channel base 53 * struct coh_dma_channel - dma channel base
54 * @name: ascii name of dma channel 54 * @name: ascii name of dma channel
55 * @number: channel id number 55 * @number: channel id number
56 * @desc_nbr_max: number of preallocated descriptortors 56 * @desc_nbr_max: number of preallocated descriptors
57 * @priority_high: prio of channel, 0 low otherwise high. 57 * @priority_high: prio of channel, 0 low otherwise high.
58 * @param: configuration parameters 58 * @param: configuration parameters
59 * @dev_addr: physical address of periphal connected to channel 59 * @dev_addr: physical address of periphal connected to channel
diff --git a/arch/arm/plat-mxc/include/mach/mx21-usbhost.h b/arch/arm/plat-mxc/include/mach/mx21-usbhost.h
new file mode 100644
index 000000000000..22d0b596262c
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/mx21-usbhost.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __ASM_ARCH_MX21_USBH
16#define __ASM_ARCH_MX21_USBH
17
18enum mx21_usbh_xcvr {
19 /* Values below as used by hardware (HWMODE register) */
20 MX21_USBXCVR_TXDIF_RXDIF = 0,
21 MX21_USBXCVR_TXDIF_RXSE = 1,
22 MX21_USBXCVR_TXSE_RXDIF = 2,
23 MX21_USBXCVR_TXSE_RXSE = 3,
24};
25
26struct mx21_usbh_platform_data {
27 enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */
28 enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */
29 u16 enable_host1:1,
30 enable_host2:1,
31 enable_otg_host:1, /* enable "OTG" port (as host) */
32 host1_xcverless:1, /* traceiverless host1 port */
33 host1_txenoe:1, /* output enable host1 transmit enable */
34 otg_ext_xcvr:1, /* external tranceiver for OTG port */
35 unused:10;
36};
37
38#endif /* __ASM_ARCH_MX21_USBH */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index b13d1879e51b..3a4bc1a18433 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1770,10 +1770,13 @@ at32_add_device_usba(unsigned int id, struct usba_platform_data *data)
1770 ARRAY_SIZE(usba0_resource))) 1770 ARRAY_SIZE(usba0_resource)))
1771 goto out_free_pdev; 1771 goto out_free_pdev;
1772 1772
1773 if (data) 1773 if (data) {
1774 usba_data.pdata.vbus_pin = data->vbus_pin; 1774 usba_data.pdata.vbus_pin = data->vbus_pin;
1775 else 1775 usba_data.pdata.vbus_pin_inverted = data->vbus_pin_inverted;
1776 } else {
1776 usba_data.pdata.vbus_pin = -EINVAL; 1777 usba_data.pdata.vbus_pin = -EINVAL;
1778 usba_data.pdata.vbus_pin_inverted = -EINVAL;
1779 }
1777 1780
1778 data = &usba_data.pdata; 1781 data = &usba_data.pdata;
1779 data->num_ep = ARRAY_SIZE(at32_usba_ep); 1782 data->num_ep = ARRAY_SIZE(at32_usba_ep);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index b0ed0b487ff2..01b2f58dfb95 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -816,8 +816,8 @@ ENDPROC(_resume)
816 816
817ENTRY(_ret_from_exception) 817ENTRY(_ret_from_exception)
818#ifdef CONFIG_IPIPE 818#ifdef CONFIG_IPIPE
819 p2.l = _per_cpu__ipipe_percpu_domain; 819 p2.l = _ipipe_percpu_domain;
820 p2.h = _per_cpu__ipipe_percpu_domain; 820 p2.h = _ipipe_percpu_domain;
821 r0.l = _ipipe_root; 821 r0.l = _ipipe_root;
822 r0.h = _ipipe_root; 822 r0.h = _ipipe_root;
823 r2 = [p2]; 823 r2 = [p2];
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 2c18d08cd913..c52bef39e250 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -358,7 +358,7 @@ mmu_bus_fault:
3581: btstq 12, $r1 ; Refill? 3581: btstq 12, $r1 ; Refill?
359 bpl 2f 359 bpl 2f
360 lsrq 24, $r1 ; Get PGD index (bit 24-31) 360 lsrq 24, $r1 ; Get PGD index (bit 24-31)
361 move.d [per_cpu__current_pgd], $r0 ; PGD for the current process 361 move.d [current_pgd], $r0 ; PGD for the current process
362 move.d [$r0+$r1.d], $r0 ; Get PMD 362 move.d [$r0+$r1.d], $r0 ; Get PMD
363 beq 2f 363 beq 2f
364 nop 364 nop
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
index 2238d154bde3..f125d912e140 100644
--- a/arch/cris/arch-v32/mm/mmu.S
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -115,7 +115,7 @@
115#ifdef CONFIG_SMP 115#ifdef CONFIG_SMP
116 move $s7, $acr ; PGD 116 move $s7, $acr ; PGD
117#else 117#else
118 move.d per_cpu__current_pgd, $acr ; PGD 118 move.d current_pgd, $acr ; PGD
119#endif 119#endif
120 ; Look up PMD in PGD 120 ; Look up PMD in PGD
121 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31) 121 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 30cf46534dd2..f7c00a5e0e2b 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -9,7 +9,7 @@
9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE 9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
10 10
11#ifdef __ASSEMBLY__ 11#ifdef __ASSEMBLY__
12# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */ 12# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
13#else /* !__ASSEMBLY__ */ 13#else /* !__ASSEMBLY__ */
14 14
15 15
@@ -39,7 +39,7 @@ extern void *per_cpu_init(void);
39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly 39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
40 * more efficient. 40 * more efficient.
41 */ 41 */
42#define __ia64_per_cpu_var(var) per_cpu__##var 42#define __ia64_per_cpu_var(var) var
43 43
44#include <asm-generic/percpu.h> 44#include <asm-generic/percpu.h>
45 45
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
index b8370c8b6198..baa74c82aa71 100644
--- a/arch/ia64/include/asm/xen/events.h
+++ b/arch/ia64/include/asm/xen/events.h
@@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
36 return !(ia64_psr(regs)->i); 36 return !(ia64_psr(regs)->i);
37} 37}
38 38
39static inline void handle_irq(int irq, struct pt_regs *regs)
40{
41 __do_IRQ(irq);
42}
43#define irq_ctx_init(cpu) do { } while (0) 39#define irq_ctx_init(cpu) do { } while (0)
44 40
45#endif /* _ASM_IA64_XEN_EVENTS_H */ 41#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c16fb03037d4..a7ca07f3754e 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -852,8 +852,8 @@ __init void prefill_possible_map(void)
852 852
853 possible = available_cpus + additional_cpus; 853 possible = available_cpus + additional_cpus;
854 854
855 if (possible > NR_CPUS) 855 if (possible > nr_cpu_ids)
856 possible = NR_CPUS; 856 possible = nr_cpu_ids;
857 857
858 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 858 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
859 possible, max((possible - available_cpus), 0)); 859 possible, max((possible - available_cpus), 0));
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 461b99902bf6..7f4a0ed24152 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
30#endif 30#endif
31 31
32#include <asm/processor.h> 32#include <asm/processor.h>
33EXPORT_SYMBOL(per_cpu__ia64_cpu_info); 33EXPORT_SYMBOL(ia64_cpu_info);
34#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
35EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); 35EXPORT_SYMBOL(local_per_cpu_offset);
36#endif 36#endif
37 37
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 01c75797119c..fa4d1e59deb0 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
26 select ANON_INODES 26 select ANON_INODES
27 select HAVE_KVM_IRQCHIP 27 select HAVE_KVM_IRQCHIP
28 select KVM_APIC_ARCHITECTURE 28 select KVM_APIC_ARCHITECTURE
29 select KVM_MMIO
29 ---help--- 30 ---help---
30 Support hosting fully virtualized guest machines using hardware 31 Support hosting fully virtualized guest machines using hardware
31 virtualization extensions. You will need a fairly recent 32 virtualization extensions. You will need a fairly recent
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5fdeec5fddcf..26e0e089bfe7 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -241,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
241 return 0; 241 return 0;
242mmio: 242mmio:
243 if (p->dir) 243 if (p->dir)
244 r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr, 244 r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
245 p->size, &p->data); 245 p->size, &p->data);
246 else 246 else
247 r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr, 247 r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
248 p->size, &p->data); 248 p->size, &p->data);
249 if (r) 249 if (r)
250 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); 250 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
@@ -636,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
636static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 636static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
637{ 637{
638 union context *host_ctx, *guest_ctx; 638 union context *host_ctx, *guest_ctx;
639 int r; 639 int r, idx;
640 640
641 /* 641 idx = srcu_read_lock(&vcpu->kvm->srcu);
642 * down_read() may sleep and return with interrupts enabled
643 */
644 down_read(&vcpu->kvm->slots_lock);
645 642
646again: 643again:
647 if (signal_pending(current)) { 644 if (signal_pending(current)) {
@@ -663,7 +660,7 @@ again:
663 if (r < 0) 660 if (r < 0)
664 goto vcpu_run_fail; 661 goto vcpu_run_fail;
665 662
666 up_read(&vcpu->kvm->slots_lock); 663 srcu_read_unlock(&vcpu->kvm->srcu, idx);
667 kvm_guest_enter(); 664 kvm_guest_enter();
668 665
669 /* 666 /*
@@ -687,7 +684,7 @@ again:
687 kvm_guest_exit(); 684 kvm_guest_exit();
688 preempt_enable(); 685 preempt_enable();
689 686
690 down_read(&vcpu->kvm->slots_lock); 687 idx = srcu_read_lock(&vcpu->kvm->srcu);
691 688
692 r = kvm_handle_exit(kvm_run, vcpu); 689 r = kvm_handle_exit(kvm_run, vcpu);
693 690
@@ -697,10 +694,10 @@ again:
697 } 694 }
698 695
699out: 696out:
700 up_read(&vcpu->kvm->slots_lock); 697 srcu_read_unlock(&vcpu->kvm->srcu, idx);
701 if (r > 0) { 698 if (r > 0) {
702 kvm_resched(vcpu); 699 kvm_resched(vcpu);
703 down_read(&vcpu->kvm->slots_lock); 700 idx = srcu_read_lock(&vcpu->kvm->srcu);
704 goto again; 701 goto again;
705 } 702 }
706 703
@@ -971,7 +968,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
971 goto out; 968 goto out;
972 r = kvm_setup_default_irq_routing(kvm); 969 r = kvm_setup_default_irq_routing(kvm);
973 if (r) { 970 if (r) {
974 kfree(kvm->arch.vioapic); 971 kvm_ioapic_destroy(kvm);
975 goto out; 972 goto out;
976 } 973 }
977 break; 974 break;
@@ -1377,12 +1374,14 @@ static void free_kvm(struct kvm *kvm)
1377 1374
1378static void kvm_release_vm_pages(struct kvm *kvm) 1375static void kvm_release_vm_pages(struct kvm *kvm)
1379{ 1376{
1377 struct kvm_memslots *slots;
1380 struct kvm_memory_slot *memslot; 1378 struct kvm_memory_slot *memslot;
1381 int i, j; 1379 int i, j;
1382 unsigned long base_gfn; 1380 unsigned long base_gfn;
1383 1381
1384 for (i = 0; i < kvm->nmemslots; i++) { 1382 slots = rcu_dereference(kvm->memslots);
1385 memslot = &kvm->memslots[i]; 1383 for (i = 0; i < slots->nmemslots; i++) {
1384 memslot = &slots->memslots[i];
1386 base_gfn = memslot->base_gfn; 1385 base_gfn = memslot->base_gfn;
1387 1386
1388 for (j = 0; j < memslot->npages; j++) { 1387 for (j = 0; j < memslot->npages; j++) {
@@ -1405,6 +1404,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1405 kfree(kvm->arch.vioapic); 1404 kfree(kvm->arch.vioapic);
1406 kvm_release_vm_pages(kvm); 1405 kvm_release_vm_pages(kvm);
1407 kvm_free_physmem(kvm); 1406 kvm_free_physmem(kvm);
1407 cleanup_srcu_struct(&kvm->srcu);
1408 free_kvm(kvm); 1408 free_kvm(kvm);
1409} 1409}
1410 1410
@@ -1576,15 +1576,15 @@ out:
1576 return r; 1576 return r;
1577} 1577}
1578 1578
1579int kvm_arch_set_memory_region(struct kvm *kvm, 1579int kvm_arch_prepare_memory_region(struct kvm *kvm,
1580 struct kvm_userspace_memory_region *mem, 1580 struct kvm_memory_slot *memslot,
1581 struct kvm_memory_slot old, 1581 struct kvm_memory_slot old,
1582 struct kvm_userspace_memory_region *mem,
1582 int user_alloc) 1583 int user_alloc)
1583{ 1584{
1584 unsigned long i; 1585 unsigned long i;
1585 unsigned long pfn; 1586 unsigned long pfn;
1586 int npages = mem->memory_size >> PAGE_SHIFT; 1587 int npages = memslot->npages;
1587 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1588 unsigned long base_gfn = memslot->base_gfn; 1588 unsigned long base_gfn = memslot->base_gfn;
1589 1589
1590 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) 1590 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1608,6 +1608,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1608 return 0; 1608 return 0;
1609} 1609}
1610 1610
1611void kvm_arch_commit_memory_region(struct kvm *kvm,
1612 struct kvm_userspace_memory_region *mem,
1613 struct kvm_memory_slot old,
1614 int user_alloc)
1615{
1616 return;
1617}
1618
1611void kvm_arch_flush_shadow(struct kvm *kvm) 1619void kvm_arch_flush_shadow(struct kvm *kvm)
1612{ 1620{
1613 kvm_flush_remote_tlbs(kvm); 1621 kvm_flush_remote_tlbs(kvm);
@@ -1802,7 +1810,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1802 if (log->slot >= KVM_MEMORY_SLOTS) 1810 if (log->slot >= KVM_MEMORY_SLOTS)
1803 goto out; 1811 goto out;
1804 1812
1805 memslot = &kvm->memslots[log->slot]; 1813 memslot = &kvm->memslots->memslots[log->slot];
1806 r = -ENOENT; 1814 r = -ENOENT;
1807 if (!memslot->dirty_bitmap) 1815 if (!memslot->dirty_bitmap)
1808 goto out; 1816 goto out;
@@ -1827,6 +1835,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1827 struct kvm_memory_slot *memslot; 1835 struct kvm_memory_slot *memslot;
1828 int is_dirty = 0; 1836 int is_dirty = 0;
1829 1837
1838 mutex_lock(&kvm->slots_lock);
1830 spin_lock(&kvm->arch.dirty_log_lock); 1839 spin_lock(&kvm->arch.dirty_log_lock);
1831 1840
1832 r = kvm_ia64_sync_dirty_log(kvm, log); 1841 r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1840,12 +1849,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1840 /* If nothing is dirty, don't bother messing with page tables. */ 1849 /* If nothing is dirty, don't bother messing with page tables. */
1841 if (is_dirty) { 1850 if (is_dirty) {
1842 kvm_flush_remote_tlbs(kvm); 1851 kvm_flush_remote_tlbs(kvm);
1843 memslot = &kvm->memslots[log->slot]; 1852 memslot = &kvm->memslots->memslots[log->slot];
1844 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1853 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1845 memset(memslot->dirty_bitmap, 0, n); 1854 memset(memslot->dirty_bitmap, 0, n);
1846 } 1855 }
1847 r = 0; 1856 r = 0;
1848out: 1857out:
1858 mutex_unlock(&kvm->slots_lock);
1849 spin_unlock(&kvm->arch.dirty_log_lock); 1859 spin_unlock(&kvm->arch.dirty_log_lock);
1850 return r; 1860 return r;
1851} 1861}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index e4b82319881d..cb548ee9fcae 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -75,7 +75,7 @@ static void set_pal_result(struct kvm_vcpu *vcpu,
75 struct exit_ctl_data *p; 75 struct exit_ctl_data *p;
76 76
77 p = kvm_get_exit_data(vcpu); 77 p = kvm_get_exit_data(vcpu);
78 if (p && p->exit_reason == EXIT_REASON_PAL_CALL) { 78 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
79 p->u.pal_data.ret = result; 79 p->u.pal_data.ret = result;
80 return ; 80 return ;
81 } 81 }
@@ -87,7 +87,7 @@ static void set_sal_result(struct kvm_vcpu *vcpu,
87 struct exit_ctl_data *p; 87 struct exit_ctl_data *p;
88 88
89 p = kvm_get_exit_data(vcpu); 89 p = kvm_get_exit_data(vcpu);
90 if (p && p->exit_reason == EXIT_REASON_SAL_CALL) { 90 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
91 p->u.sal_data.ret = result; 91 p->u.sal_data.ret = result;
92 return ; 92 return ;
93 } 93 }
@@ -322,7 +322,7 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
322 struct exit_ctl_data *p; 322 struct exit_ctl_data *p;
323 323
324 p = kvm_get_exit_data(vcpu); 324 p = kvm_get_exit_data(vcpu);
325 if (p && (p->exit_reason == EXIT_REASON_PAL_CALL)) 325 if (p->exit_reason == EXIT_REASON_PAL_CALL)
326 index = p->u.pal_data.gr28; 326 index = p->u.pal_data.gr28;
327 327
328 return index; 328 return index;
@@ -646,18 +646,16 @@ static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
646 646
647 p = kvm_get_exit_data(vcpu); 647 p = kvm_get_exit_data(vcpu);
648 648
649 if (p) { 649 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
650 if (p->exit_reason == EXIT_REASON_SAL_CALL) { 650 *in0 = p->u.sal_data.in0;
651 *in0 = p->u.sal_data.in0; 651 *in1 = p->u.sal_data.in1;
652 *in1 = p->u.sal_data.in1; 652 *in2 = p->u.sal_data.in2;
653 *in2 = p->u.sal_data.in2; 653 *in3 = p->u.sal_data.in3;
654 *in3 = p->u.sal_data.in3; 654 *in4 = p->u.sal_data.in4;
655 *in4 = p->u.sal_data.in4; 655 *in5 = p->u.sal_data.in5;
656 *in5 = p->u.sal_data.in5; 656 *in6 = p->u.sal_data.in6;
657 *in6 = p->u.sal_data.in6; 657 *in7 = p->u.sal_data.in7;
658 *in7 = p->u.sal_data.in7; 658 return ;
659 return ;
660 }
661 } 659 }
662 *in0 = 0; 660 *in0 = 0;
663} 661}
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 9bf55afd08d0..fb8f9f59a1ed 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -316,8 +316,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
316 return; 316 return;
317 } else { 317 } else {
318 inst_type = -1; 318 inst_type = -1;
319 panic_vm(vcpu, "Unsupported MMIO access instruction! \ 319 panic_vm(vcpu, "Unsupported MMIO access instruction! "
320 Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", 320 "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
321 bundle.i64[0], bundle.i64[1]); 321 bundle.i64[0], bundle.i64[1]);
322 } 322 }
323 323
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index dce75b70cdd5..958815c9787d 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -1639,8 +1639,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639 * Otherwise panic 1639 * Otherwise panic
1640 */ 1640 */
1641 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) 1641 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1642 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ 1642 panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
1643 & vpsr.is=0\n"); 1643 "& vpsr.is=0\n");
1644 1644
1645 /* 1645 /*
1646 * For those IA64_PSR bits: id/da/dd/ss/ed/ia 1646 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 19c4b2195dce..8d586d1e2515 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -459,7 +459,7 @@ static void __init initialize_pernode_data(void)
459 cpu = 0; 459 cpu = 0;
460 node = node_cpuid[cpu].nid; 460 node = node_cpuid[cpu].nid;
461 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 461 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
462 ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start)); 462 ((char *)&ia64_cpu_info - __per_cpu_start));
463 cpu0_cpu_info->node_data = mem_data[node].node_data; 463 cpu0_cpu_info->node_data = mem_data[node].node_data;
464 } 464 }
465#endif /* CONFIG_SMP */ 465#endif /* CONFIG_SMP */
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 22256d138630..734bca87018a 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
338 * a variable, not an address. 338 * a variable, not an address.
339 */ 339 */
340 340
341/* Need to disable preemption for the cpu local counters otherwise we could
342 still access a variable of a previous CPU in a non local way. */
343#define cpu_local_wrap_v(l) \
344 ({ local_t res__; \
345 preempt_disable(); \
346 res__ = (l); \
347 preempt_enable(); \
348 res__; })
349#define cpu_local_wrap(l) \
350 ({ preempt_disable(); \
351 l; \
352 preempt_enable(); }) \
353
354#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
355#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
356#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
357#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
358#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
359#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
360
361#define __cpu_local_inc(l) cpu_local_inc(l)
362#define __cpu_local_dec(l) cpu_local_dec(l)
363#define __cpu_local_add(i, l) cpu_local_add((i), (l))
364#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
365
366#endif /* __M32R_LOCAL_H */ 341#endif /* __M32R_LOCAL_H */
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index 61abbd232640..ec89f2ad0fe1 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -21,7 +21,7 @@
21 * places 21 * places
22 */ 22 */
23 23
24#define PER_CPU(var) per_cpu__##var 24#define PER_CPU(var) var
25 25
26# ifndef __ASSEMBLY__ 26# ifndef __ASSEMBLY__
27DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ 27DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 361f4f16c30c..bdcdef02d147 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
193#define __local_add(i, l) ((l)->a.counter+=(i)) 193#define __local_add(i, l) ((l)->a.counter+=(i))
194#define __local_sub(i, l) ((l)->a.counter-=(i)) 194#define __local_sub(i, l) ((l)->a.counter-=(i))
195 195
196/* Need to disable preemption for the cpu local counters otherwise we could
197 still access a variable of a previous CPU in a non atomic way. */
198#define cpu_local_wrap_v(l) \
199 ({ local_t res__; \
200 preempt_disable(); \
201 res__ = (l); \
202 preempt_enable(); \
203 res__; })
204#define cpu_local_wrap(l) \
205 ({ preempt_disable(); \
206 l; \
207 preempt_enable(); }) \
208
209#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
210#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
211#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
212#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
213#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
214#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
215
216#define __cpu_local_inc(l) cpu_local_inc(l)
217#define __cpu_local_dec(l) cpu_local_dec(l)
218#define __cpu_local_add(i, l) cpu_local_add((i), (l))
219#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
220
221#endif /* _ARCH_MIPS_LOCAL_H */ 196#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index d172d4245cdc..f8c45cc2947d 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -36,8 +36,8 @@
36#endif 36#endif
37 /* t2 = &__per_cpu_offset[smp_processor_id()]; */ 37 /* t2 = &__per_cpu_offset[smp_processor_id()]; */
38 LDREGX \t2(\t1),\t2 38 LDREGX \t2(\t1),\t2
39 addil LT%per_cpu__exception_data,%r27 39 addil LT%exception_data,%r27
40 LDREG RT%per_cpu__exception_data(%r1),\t1 40 LDREG RT%exception_data(%r1),\t1
41 /* t1 = &__get_cpu_var(exception_data) */ 41 /* t1 = &__get_cpu_var(exception_data) */
42 add,l \t1,\t2,\t1 42 add,l \t1,\t2,\t1
43 /* t1 = t1->fault_ip */ 43 /* t1 = t1->fault_ip */
@@ -46,8 +46,8 @@
46#else 46#else
47 .macro get_fault_ip t1 t2 47 .macro get_fault_ip t1 t2
48 /* t1 = &__get_cpu_var(exception_data) */ 48 /* t1 = &__get_cpu_var(exception_data) */
49 addil LT%per_cpu__exception_data,%r27 49 addil LT%exception_data,%r27
50 LDREG RT%per_cpu__exception_data(%r1),\t2 50 LDREG RT%exception_data(%r1),\t2
51 /* t1 = t2->fault_ip */ 51 /* t1 = t2->fault_ip */
52 LDREG EXCDATA_IP(\t2), \t1 52 LDREG EXCDATA_IP(\t2), \t1
53 .endm 53 .endm
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index af2abe74f544..aadf2dd6f84e 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -97,4 +97,10 @@
97#define RESUME_HOST RESUME_FLAG_HOST 97#define RESUME_HOST RESUME_FLAG_HOST
98#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV) 98#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
99 99
100#define KVM_GUEST_MODE_NONE 0
101#define KVM_GUEST_MODE_GUEST 1
102#define KVM_GUEST_MODE_SKIP 2
103
104#define KVM_INST_FETCH_FAILED -1
105
100#endif /* __POWERPC_KVM_ASM_H__ */ 106#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 74b7369770d0..db7db0a96967 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -22,7 +22,7 @@
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25#include <asm/kvm_ppc.h> 25#include <asm/kvm_book3s_64_asm.h>
26 26
27struct kvmppc_slb { 27struct kvmppc_slb {
28 u64 esid; 28 u64 esid;
@@ -33,7 +33,8 @@ struct kvmppc_slb {
33 bool Ks; 33 bool Ks;
34 bool Kp; 34 bool Kp;
35 bool nx; 35 bool nx;
36 bool large; 36 bool large; /* PTEs are 16MB */
37 bool tb; /* 1TB segment */
37 bool class; 38 bool class;
38}; 39};
39 40
@@ -69,6 +70,7 @@ struct kvmppc_sid_map {
69 70
70struct kvmppc_vcpu_book3s { 71struct kvmppc_vcpu_book3s {
71 struct kvm_vcpu vcpu; 72 struct kvm_vcpu vcpu;
73 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
72 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 74 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
73 struct kvmppc_slb slb[64]; 75 struct kvmppc_slb slb[64];
74 struct { 76 struct {
@@ -89,6 +91,7 @@ struct kvmppc_vcpu_book3s {
89 u64 vsid_next; 91 u64 vsid_next;
90 u64 vsid_max; 92 u64 vsid_max;
91 int context_id; 93 int context_id;
94 ulong prog_flags; /* flags to inject when giving a 700 trap */
92}; 95};
93 96
94#define CONTEXT_HOST 0 97#define CONTEXT_HOST 0
@@ -119,6 +122,10 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
119 122
120extern u32 kvmppc_trampoline_lowmem; 123extern u32 kvmppc_trampoline_lowmem;
121extern u32 kvmppc_trampoline_enter; 124extern u32 kvmppc_trampoline_enter;
125extern void kvmppc_rmcall(ulong srr0, ulong srr1);
126extern void kvmppc_load_up_fpu(void);
127extern void kvmppc_load_up_altivec(void);
128extern void kvmppc_load_up_vsx(void);
122 129
123static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 130static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
124{ 131{
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
index 2e06ee8184ef..183461b48407 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
@@ -20,6 +20,8 @@
20#ifndef __ASM_KVM_BOOK3S_ASM_H__ 20#ifndef __ASM_KVM_BOOK3S_ASM_H__
21#define __ASM_KVM_BOOK3S_ASM_H__ 21#define __ASM_KVM_BOOK3S_ASM_H__
22 22
23#ifdef __ASSEMBLY__
24
23#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 25#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
24 26
25#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
@@ -55,4 +57,20 @@ kvmppc_resume_\intno:
55 57
56#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */ 58#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
57 59
60#else /*__ASSEMBLY__ */
61
62struct kvmppc_book3s_shadow_vcpu {
63 ulong gpr[14];
64 u32 cr;
65 u32 xer;
66 ulong host_r1;
67 ulong host_r2;
68 ulong handler;
69 ulong scratch0;
70 ulong scratch1;
71 ulong vmhandler;
72};
73
74#endif /*__ASSEMBLY__ */
75
58#endif /* __ASM_KVM_BOOK3S_ASM_H__ */ 76#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index 9d497ce49726..7fea26fffb25 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -52,9 +52,12 @@ struct kvmppc_vcpu_e500 {
52 u32 mas5; 52 u32 mas5;
53 u32 mas6; 53 u32 mas6;
54 u32 mas7; 54 u32 mas7;
55 u32 l1csr0;
55 u32 l1csr1; 56 u32 l1csr1;
56 u32 hid0; 57 u32 hid0;
57 u32 hid1; 58 u32 hid1;
59 u32 tlb0cfg;
60 u32 tlb1cfg;
58 61
59 struct kvm_vcpu vcpu; 62 struct kvm_vcpu vcpu;
60}; 63};
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1201f62d0d73..5e5bae7e152f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -167,23 +167,40 @@ struct kvm_vcpu_arch {
167 ulong trampoline_lowmem; 167 ulong trampoline_lowmem;
168 ulong trampoline_enter; 168 ulong trampoline_enter;
169 ulong highmem_handler; 169 ulong highmem_handler;
170 ulong rmcall;
170 ulong host_paca_phys; 171 ulong host_paca_phys;
171 struct kvmppc_mmu mmu; 172 struct kvmppc_mmu mmu;
172#endif 173#endif
173 174
174 u64 fpr[32];
175 ulong gpr[32]; 175 ulong gpr[32];
176 176
177 u64 fpr[32];
178 u32 fpscr;
179
180#ifdef CONFIG_ALTIVEC
181 vector128 vr[32];
182 vector128 vscr;
183#endif
184
185#ifdef CONFIG_VSX
186 u64 vsr[32];
187#endif
188
177 ulong pc; 189 ulong pc;
178 u32 cr;
179 ulong ctr; 190 ulong ctr;
180 ulong lr; 191 ulong lr;
192
193#ifdef CONFIG_BOOKE
181 ulong xer; 194 ulong xer;
195 u32 cr;
196#endif
182 197
183 ulong msr; 198 ulong msr;
184#ifdef CONFIG_PPC64 199#ifdef CONFIG_PPC64
185 ulong shadow_msr; 200 ulong shadow_msr;
201 ulong shadow_srr1;
186 ulong hflags; 202 ulong hflags;
203 ulong guest_owned_ext;
187#endif 204#endif
188 u32 mmucr; 205 u32 mmucr;
189 ulong sprg0; 206 ulong sprg0;
@@ -242,6 +259,8 @@ struct kvm_vcpu_arch {
242#endif 259#endif
243 ulong fault_dear; 260 ulong fault_dear;
244 ulong fault_esr; 261 ulong fault_esr;
262 ulong queued_dear;
263 ulong queued_esr;
245 gpa_t paddr_accessed; 264 gpa_t paddr_accessed;
246 265
247 u8 io_gpr; /* GPR used as IO source/target */ 266 u8 io_gpr; /* GPR used as IO source/target */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 269ee46ab028..e2642829e435 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -28,6 +28,9 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/kvm_types.h> 29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h> 30#include <linux/kvm_host.h>
31#ifdef CONFIG_PPC_BOOK3S
32#include <asm/kvm_book3s.h>
33#endif
31 34
32enum emulation_result { 35enum emulation_result {
33 EMULATE_DONE, /* no further processing */ 36 EMULATE_DONE, /* no further processing */
@@ -80,8 +83,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
80 83
81extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); 84extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
82extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); 85extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
83extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); 86extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
84extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); 87extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
88extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
85extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 89extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
86 struct kvm_interrupt *irq); 90 struct kvm_interrupt *irq);
87 91
@@ -95,4 +99,81 @@ extern void kvmppc_booke_exit(void);
95 99
96extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 100extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
97 101
102#ifdef CONFIG_PPC_BOOK3S
103
104/* We assume we're always acting on the current vcpu */
105
106static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
107{
108 if ( num < 14 ) {
109 get_paca()->shadow_vcpu.gpr[num] = val;
110 to_book3s(vcpu)->shadow_vcpu.gpr[num] = val;
111 } else
112 vcpu->arch.gpr[num] = val;
113}
114
115static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
116{
117 if ( num < 14 )
118 return get_paca()->shadow_vcpu.gpr[num];
119 else
120 return vcpu->arch.gpr[num];
121}
122
123static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
124{
125 get_paca()->shadow_vcpu.cr = val;
126 to_book3s(vcpu)->shadow_vcpu.cr = val;
127}
128
129static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
130{
131 return get_paca()->shadow_vcpu.cr;
132}
133
134static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
135{
136 get_paca()->shadow_vcpu.xer = val;
137 to_book3s(vcpu)->shadow_vcpu.xer = val;
138}
139
140static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
141{
142 return get_paca()->shadow_vcpu.xer;
143}
144
145#else
146
147static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
148{
149 vcpu->arch.gpr[num] = val;
150}
151
152static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
153{
154 return vcpu->arch.gpr[num];
155}
156
157static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
158{
159 vcpu->arch.cr = val;
160}
161
162static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
163{
164 return vcpu->arch.cr;
165}
166
167static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
168{
169 vcpu->arch.xer = val;
170}
171
172static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
173{
174 return vcpu->arch.xer;
175}
176
177#endif
178
98#endif /* __POWERPC_KVM_PPC_H__ */ 179#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index ce58c80e1bcf..c2410af6bfd9 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l)
172#define __local_add(i,l) ((l)->a.counter+=(i)) 172#define __local_add(i,l) ((l)->a.counter+=(i))
173#define __local_sub(i,l) ((l)->a.counter-=(i)) 173#define __local_sub(i,l) ((l)->a.counter-=(i))
174 174
175/* Need to disable preemption for the cpu local counters otherwise we could
176 still access a variable of a previous CPU in a non atomic way. */
177#define cpu_local_wrap_v(l) \
178 ({ local_t res__; \
179 preempt_disable(); \
180 res__ = (l); \
181 preempt_enable(); \
182 res__; })
183#define cpu_local_wrap(l) \
184 ({ preempt_disable(); \
185 l; \
186 preempt_enable(); }) \
187
188#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
189#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
190#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
191#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
192#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
193#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
194
195#define __cpu_local_inc(l) cpu_local_inc(l)
196#define __cpu_local_dec(l) cpu_local_dec(l)
197#define __cpu_local_add(i, l) cpu_local_add((i), (l))
198#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
199
200#endif /* _ARCH_POWERPC_LOCAL_H */ 175#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 5e9b4ef71415..d8a693109c82 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -19,6 +19,9 @@
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/exception-64e.h> 21#include <asm/exception-64e.h>
22#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
23#include <asm/kvm_book3s_64_asm.h>
24#endif
22 25
23register struct paca_struct *local_paca asm("r13"); 26register struct paca_struct *local_paca asm("r13");
24 27
@@ -135,6 +138,8 @@ struct paca_struct {
135 u64 esid; 138 u64 esid;
136 u64 vsid; 139 u64 vsid;
137 } kvm_slb[64]; /* guest SLB */ 140 } kvm_slb[64]; /* guest SLB */
141 /* We use this to store guest state in */
142 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
138 u8 kvm_slb_max; /* highest used guest slb entry */ 143 u8 kvm_slb_max; /* highest used guest slb entry */
139 u8 kvm_in_guest; /* are we inside the guest? */ 144 u8 kvm_in_guest; /* are we inside the guest? */
140#endif 145#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bc8dd53f718a..5572e86223f4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -426,6 +426,10 @@
426#define SRR1_WAKEMT 0x00280000 /* mtctrl */ 426#define SRR1_WAKEMT 0x00280000 /* mtctrl */
427#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ 427#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
428#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ 428#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
429#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
430#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
431#define SRR1_PROGTRAP 0x00020000 /* Trap */
432#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
429#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ 433#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
430#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ 434#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
431 435
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a6c2b63227b3..957ceb7059c5 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -194,6 +194,30 @@ int main(void)
194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); 194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); 195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); 196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
197 DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
198 DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
199 DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
200 DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
201 DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
202 DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
203 DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
204 DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
205 DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
206 DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
207 DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
208 DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
209 DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
210 DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
211 DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
212 DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
213 DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
214 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
215 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
216 shadow_vcpu.vmhandler));
217 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
218 shadow_vcpu.scratch0));
219 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
220 shadow_vcpu.scratch1));
197#endif 221#endif
198#endif /* CONFIG_PPC64 */ 222#endif /* CONFIG_PPC64 */
199 223
@@ -389,8 +413,6 @@ int main(void)
389 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 413 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
390 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 414 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
391 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 415 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
392 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
393 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
394 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 416 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
395 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 417 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
396 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); 418 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
@@ -411,11 +433,16 @@ int main(void)
411 DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2)); 433 DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
412 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 434 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
413 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 435 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
436 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
414 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 437 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
415 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 438 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
416 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 439 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
440 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
417 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 441 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
418#endif 442#else
443 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
444 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
445#endif /* CONFIG_PPC64 */
419#endif 446#endif
420#ifdef CONFIG_44x 447#ifdef CONFIG_44x
421 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 448 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 425451453e96..ab3e392ac63c 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -107,6 +107,7 @@ EXPORT_SYMBOL(giveup_altivec);
107#endif /* CONFIG_ALTIVEC */ 107#endif /* CONFIG_ALTIVEC */
108#ifdef CONFIG_VSX 108#ifdef CONFIG_VSX
109EXPORT_SYMBOL(giveup_vsx); 109EXPORT_SYMBOL(giveup_vsx);
110EXPORT_SYMBOL_GPL(__giveup_vsx);
110#endif /* CONFIG_VSX */ 111#endif /* CONFIG_VSX */
111#ifdef CONFIG_SPE 112#ifdef CONFIG_SPE
112EXPORT_SYMBOL(giveup_spe); 113EXPORT_SYMBOL(giveup_spe);
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 61af58fcecee..65ea083a5b27 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
65 */ 65 */
66 switch (dcrn) { 66 switch (dcrn) {
67 case DCRN_CPR0_CONFIG_ADDR: 67 case DCRN_CPR0_CONFIG_ADDR:
68 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; 68 kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
69 break; 69 break;
70 case DCRN_CPR0_CONFIG_DATA: 70 case DCRN_CPR0_CONFIG_DATA:
71 local_irq_disable(); 71 local_irq_disable();
72 mtdcr(DCRN_CPR0_CONFIG_ADDR, 72 mtdcr(DCRN_CPR0_CONFIG_ADDR,
73 vcpu->arch.cpr0_cfgaddr); 73 vcpu->arch.cpr0_cfgaddr);
74 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); 74 kvmppc_set_gpr(vcpu, rt,
75 mfdcr(DCRN_CPR0_CONFIG_DATA));
75 local_irq_enable(); 76 local_irq_enable();
76 break; 77 break;
77 default: 78 default:
@@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
93 /* emulate some access in kernel */ 94 /* emulate some access in kernel */
94 switch (dcrn) { 95 switch (dcrn) {
95 case DCRN_CPR0_CONFIG_ADDR: 96 case DCRN_CPR0_CONFIG_ADDR:
96 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; 97 vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
97 break; 98 break;
98 default: 99 default:
99 run->dcr.dcrn = dcrn; 100 run->dcr.dcrn = dcrn;
100 run->dcr.data = vcpu->arch.gpr[rs]; 101 run->dcr.data = kvmppc_get_gpr(vcpu, rs);
101 run->dcr.is_write = 1; 102 run->dcr.is_write = 1;
102 vcpu->arch.dcr_needed = 1; 103 vcpu->arch.dcr_needed = 1;
103 kvmppc_account_exit(vcpu, DCR_EXITS); 104 kvmppc_account_exit(vcpu, DCR_EXITS);
@@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
146 147
147 switch (sprn) { 148 switch (sprn) {
148 case SPRN_PID: 149 case SPRN_PID:
149 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; 150 kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
150 case SPRN_MMUCR: 151 case SPRN_MMUCR:
151 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; 152 vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
152 case SPRN_CCR0: 153 case SPRN_CCR0:
153 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; 154 vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
154 case SPRN_CCR1: 155 case SPRN_CCR1:
155 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; 156 vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
156 default: 157 default:
157 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 158 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
158 } 159 }
@@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
167 168
168 switch (sprn) { 169 switch (sprn) {
169 case SPRN_PID: 170 case SPRN_PID:
170 vcpu->arch.gpr[rt] = vcpu->arch.pid; break; 171 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
171 case SPRN_MMUCR: 172 case SPRN_MMUCR:
172 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; 173 kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
173 case SPRN_CCR0: 174 case SPRN_CCR0:
174 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; 175 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
175 case SPRN_CCR1: 176 case SPRN_CCR1:
176 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; 177 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
177 default: 178 default:
178 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 179 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
179 } 180 }
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ff3cb63b8117..2570fcc7665d 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -439,7 +439,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
439 struct kvmppc_44x_tlbe *tlbe; 439 struct kvmppc_44x_tlbe *tlbe;
440 unsigned int gtlb_index; 440 unsigned int gtlb_index;
441 441
442 gtlb_index = vcpu->arch.gpr[ra]; 442 gtlb_index = kvmppc_get_gpr(vcpu, ra);
443 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { 443 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
444 printk("%s: index %d\n", __func__, gtlb_index); 444 printk("%s: index %d\n", __func__, gtlb_index);
445 kvmppc_dump_vcpu(vcpu); 445 kvmppc_dump_vcpu(vcpu);
@@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
455 switch (ws) { 455 switch (ws) {
456 case PPC44x_TLB_PAGEID: 456 case PPC44x_TLB_PAGEID:
457 tlbe->tid = get_mmucr_stid(vcpu); 457 tlbe->tid = get_mmucr_stid(vcpu);
458 tlbe->word0 = vcpu->arch.gpr[rs]; 458 tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
459 break; 459 break;
460 460
461 case PPC44x_TLB_XLAT: 461 case PPC44x_TLB_XLAT:
462 tlbe->word1 = vcpu->arch.gpr[rs]; 462 tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
463 break; 463 break;
464 464
465 case PPC44x_TLB_ATTRIB: 465 case PPC44x_TLB_ATTRIB:
466 tlbe->word2 = vcpu->arch.gpr[rs]; 466 tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
467 break; 467 break;
468 468
469 default: 469 default:
@@ -500,18 +500,20 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
500 unsigned int as = get_mmucr_sts(vcpu); 500 unsigned int as = get_mmucr_sts(vcpu);
501 unsigned int pid = get_mmucr_stid(vcpu); 501 unsigned int pid = get_mmucr_stid(vcpu);
502 502
503 ea = vcpu->arch.gpr[rb]; 503 ea = kvmppc_get_gpr(vcpu, rb);
504 if (ra) 504 if (ra)
505 ea += vcpu->arch.gpr[ra]; 505 ea += kvmppc_get_gpr(vcpu, ra);
506 506
507 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); 507 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
508 if (rc) { 508 if (rc) {
509 u32 cr = kvmppc_get_cr(vcpu);
510
509 if (gtlb_index < 0) 511 if (gtlb_index < 0)
510 vcpu->arch.cr &= ~0x20000000; 512 kvmppc_set_cr(vcpu, cr & ~0x20000000);
511 else 513 else
512 vcpu->arch.cr |= 0x20000000; 514 kvmppc_set_cr(vcpu, cr | 0x20000000);
513 } 515 }
514 vcpu->arch.gpr[rt] = gtlb_index; 516 kvmppc_set_gpr(vcpu, rt, gtlb_index);
515 517
516 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); 518 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
517 return EMULATE_DONE; 519 return EMULATE_DONE;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index fe037fdaf1b3..60624cc9f4d4 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
20 bool 20 bool
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select KVM_MMIO
23 24
24config KVM_BOOK3S_64_HANDLER 25config KVM_BOOK3S_64_HANDLER
25 bool 26 bool
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3e294bd9b8c6..9a271f0929c7 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -33,12 +33,9 @@
33 33
34/* #define EXIT_DEBUG */ 34/* #define EXIT_DEBUG */
35/* #define EXIT_DEBUG_SIMPLE */ 35/* #define EXIT_DEBUG_SIMPLE */
36/* #define DEBUG_EXT */
36 37
37/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0. 38static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
38 * When set, we retrigger a DEC interrupt after that if DEC <= 0.
39 * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */
40
41/* #define AGGRESSIVE_DEC */
42 39
43struct kvm_stats_debugfs_item debugfs_entries[] = { 40struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "exits", VCPU_STAT(sum_exits) }, 41 { "exits", VCPU_STAT(sum_exits) },
@@ -72,16 +69,24 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
72void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 69void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
73{ 70{
74 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); 71 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
72 memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
73 sizeof(get_paca()->shadow_vcpu));
75 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; 74 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
76} 75}
77 76
78void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 77void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
79{ 78{
80 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); 79 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
80 memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81 sizeof(get_paca()->shadow_vcpu));
81 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; 82 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
83
84 kvmppc_giveup_ext(vcpu, MSR_FP);
85 kvmppc_giveup_ext(vcpu, MSR_VEC);
86 kvmppc_giveup_ext(vcpu, MSR_VSX);
82} 87}
83 88
84#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG) 89#if defined(EXIT_DEBUG)
85static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) 90static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
86{ 91{
87 u64 jd = mftb() - vcpu->arch.dec_jiffies; 92 u64 jd = mftb() - vcpu->arch.dec_jiffies;
@@ -89,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
89} 94}
90#endif 95#endif
91 96
97static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
98{
99 vcpu->arch.shadow_msr = vcpu->arch.msr;
100 /* Guest MSR values */
101 vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
102 MSR_BE | MSR_DE;
103 /* Process MSR values */
104 vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR |
105 MSR_EE;
106 /* External providers the guest reserved */
107 vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext);
108 /* 64-bit Process MSR values */
109#ifdef CONFIG_PPC_BOOK3S_64
110 vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV;
111#endif
112}
113
92void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 114void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
93{ 115{
94 ulong old_msr = vcpu->arch.msr; 116 ulong old_msr = vcpu->arch.msr;
@@ -96,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
96#ifdef EXIT_DEBUG 118#ifdef EXIT_DEBUG
97 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 119 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
98#endif 120#endif
121
99 msr &= to_book3s(vcpu)->msr_mask; 122 msr &= to_book3s(vcpu)->msr_mask;
100 vcpu->arch.msr = msr; 123 vcpu->arch.msr = msr;
101 vcpu->arch.shadow_msr = msr | MSR_USER32; 124 kvmppc_recalc_shadow_msr(vcpu);
102 vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 |
103 MSR_USER64 | MSR_SE | MSR_BE | MSR_DE |
104 MSR_FE1);
105 125
106 if (msr & (MSR_WE|MSR_POW)) { 126 if (msr & (MSR_WE|MSR_POW)) {
107 if (!vcpu->arch.pending_exceptions) { 127 if (!vcpu->arch.pending_exceptions) {
@@ -125,11 +145,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
125 vcpu->arch.mmu.reset_msr(vcpu); 145 vcpu->arch.mmu.reset_msr(vcpu);
126} 146}
127 147
128void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 148static int kvmppc_book3s_vec2irqprio(unsigned int vec)
129{ 149{
130 unsigned int prio; 150 unsigned int prio;
131 151
132 vcpu->stat.queue_intr++;
133 switch (vec) { 152 switch (vec) {
134 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; 153 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
135 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; 154 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
@@ -149,15 +168,31 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
149 default: prio = BOOK3S_IRQPRIO_MAX; break; 168 default: prio = BOOK3S_IRQPRIO_MAX; break;
150 } 169 }
151 170
152 set_bit(prio, &vcpu->arch.pending_exceptions); 171 return prio;
172}
173
174static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
175 unsigned int vec)
176{
177 clear_bit(kvmppc_book3s_vec2irqprio(vec),
178 &vcpu->arch.pending_exceptions);
179}
180
181void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
182{
183 vcpu->stat.queue_intr++;
184
185 set_bit(kvmppc_book3s_vec2irqprio(vec),
186 &vcpu->arch.pending_exceptions);
153#ifdef EXIT_DEBUG 187#ifdef EXIT_DEBUG
154 printk(KERN_INFO "Queueing interrupt %x\n", vec); 188 printk(KERN_INFO "Queueing interrupt %x\n", vec);
155#endif 189#endif
156} 190}
157 191
158 192
159void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) 193void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
160{ 194{
195 to_book3s(vcpu)->prog_flags = flags;
161 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); 196 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
162} 197}
163 198
@@ -171,6 +206,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
171 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); 206 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
172} 207}
173 208
209void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
210{
211 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
212}
213
174void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 214void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
175 struct kvm_interrupt *irq) 215 struct kvm_interrupt *irq)
176{ 216{
@@ -181,6 +221,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
181{ 221{
182 int deliver = 1; 222 int deliver = 1;
183 int vec = 0; 223 int vec = 0;
224 ulong flags = 0ULL;
184 225
185 switch (priority) { 226 switch (priority) {
186 case BOOK3S_IRQPRIO_DECREMENTER: 227 case BOOK3S_IRQPRIO_DECREMENTER:
@@ -214,6 +255,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
214 break; 255 break;
215 case BOOK3S_IRQPRIO_PROGRAM: 256 case BOOK3S_IRQPRIO_PROGRAM:
216 vec = BOOK3S_INTERRUPT_PROGRAM; 257 vec = BOOK3S_INTERRUPT_PROGRAM;
258 flags = to_book3s(vcpu)->prog_flags;
217 break; 259 break;
218 case BOOK3S_IRQPRIO_VSX: 260 case BOOK3S_IRQPRIO_VSX:
219 vec = BOOK3S_INTERRUPT_VSX; 261 vec = BOOK3S_INTERRUPT_VSX;
@@ -244,7 +286,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
244#endif 286#endif
245 287
246 if (deliver) 288 if (deliver)
247 kvmppc_inject_interrupt(vcpu, vec, 0ULL); 289 kvmppc_inject_interrupt(vcpu, vec, flags);
248 290
249 return deliver; 291 return deliver;
250} 292}
@@ -254,21 +296,15 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
254 unsigned long *pending = &vcpu->arch.pending_exceptions; 296 unsigned long *pending = &vcpu->arch.pending_exceptions;
255 unsigned int priority; 297 unsigned int priority;
256 298
257 /* XXX be more clever here - no need to mftb() on every entry */
258 /* Issue DEC again if it's still active */
259#ifdef AGGRESSIVE_DEC
260 if (vcpu->arch.msr & MSR_EE)
261 if (kvmppc_get_dec(vcpu) & 0x80000000)
262 kvmppc_core_queue_dec(vcpu);
263#endif
264
265#ifdef EXIT_DEBUG 299#ifdef EXIT_DEBUG
266 if (vcpu->arch.pending_exceptions) 300 if (vcpu->arch.pending_exceptions)
267 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 301 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
268#endif 302#endif
269 priority = __ffs(*pending); 303 priority = __ffs(*pending);
270 while (priority <= (sizeof(unsigned int) * 8)) { 304 while (priority <= (sizeof(unsigned int) * 8)) {
271 if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) { 305 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
306 (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
307 /* DEC interrupts get cleared by mtdec */
272 clear_bit(priority, &vcpu->arch.pending_exceptions); 308 clear_bit(priority, &vcpu->arch.pending_exceptions);
273 break; 309 break;
274 } 310 }
@@ -503,14 +539,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
503 /* Page not found in guest PTE entries */ 539 /* Page not found in guest PTE entries */
504 vcpu->arch.dear = vcpu->arch.fault_dear; 540 vcpu->arch.dear = vcpu->arch.fault_dear;
505 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; 541 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
506 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); 542 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
507 kvmppc_book3s_queue_irqprio(vcpu, vec); 543 kvmppc_book3s_queue_irqprio(vcpu, vec);
508 } else if (page_found == -EPERM) { 544 } else if (page_found == -EPERM) {
509 /* Storage protection */ 545 /* Storage protection */
510 vcpu->arch.dear = vcpu->arch.fault_dear; 546 vcpu->arch.dear = vcpu->arch.fault_dear;
511 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; 547 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
512 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 548 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
513 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); 549 vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
514 kvmppc_book3s_queue_irqprio(vcpu, vec); 550 kvmppc_book3s_queue_irqprio(vcpu, vec);
515 } else if (page_found == -EINVAL) { 551 } else if (page_found == -EINVAL) {
516 /* Page not found in guest SLB */ 552 /* Page not found in guest SLB */
@@ -532,13 +568,122 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
532 r = kvmppc_emulate_mmio(run, vcpu); 568 r = kvmppc_emulate_mmio(run, vcpu);
533 if ( r == RESUME_HOST_NV ) 569 if ( r == RESUME_HOST_NV )
534 r = RESUME_HOST; 570 r = RESUME_HOST;
535 if ( r == RESUME_GUEST_NV )
536 r = RESUME_GUEST;
537 } 571 }
538 572
539 return r; 573 return r;
540} 574}
541 575
576static inline int get_fpr_index(int i)
577{
578#ifdef CONFIG_VSX
579 i *= 2;
580#endif
581 return i;
582}
583
584/* Give up external provider (FPU, Altivec, VSX) */
585static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
586{
587 struct thread_struct *t = &current->thread;
588 u64 *vcpu_fpr = vcpu->arch.fpr;
589 u64 *vcpu_vsx = vcpu->arch.vsr;
590 u64 *thread_fpr = (u64*)t->fpr;
591 int i;
592
593 if (!(vcpu->arch.guest_owned_ext & msr))
594 return;
595
596#ifdef DEBUG_EXT
597 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
598#endif
599
600 switch (msr) {
601 case MSR_FP:
602 giveup_fpu(current);
603 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
604 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
605
606 vcpu->arch.fpscr = t->fpscr.val;
607 break;
608 case MSR_VEC:
609#ifdef CONFIG_ALTIVEC
610 giveup_altivec(current);
611 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
612 vcpu->arch.vscr = t->vscr;
613#endif
614 break;
615 case MSR_VSX:
616#ifdef CONFIG_VSX
617 __giveup_vsx(current);
618 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
619 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
620#endif
621 break;
622 default:
623 BUG();
624 }
625
626 vcpu->arch.guest_owned_ext &= ~msr;
627 current->thread.regs->msr &= ~msr;
628 kvmppc_recalc_shadow_msr(vcpu);
629}
630
631/* Handle external providers (FPU, Altivec, VSX) */
632static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
633 ulong msr)
634{
635 struct thread_struct *t = &current->thread;
636 u64 *vcpu_fpr = vcpu->arch.fpr;
637 u64 *vcpu_vsx = vcpu->arch.vsr;
638 u64 *thread_fpr = (u64*)t->fpr;
639 int i;
640
641 if (!(vcpu->arch.msr & msr)) {
642 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
643 return RESUME_GUEST;
644 }
645
646#ifdef DEBUG_EXT
647 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
648#endif
649
650 current->thread.regs->msr |= msr;
651
652 switch (msr) {
653 case MSR_FP:
654 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
655 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
656
657 t->fpscr.val = vcpu->arch.fpscr;
658 t->fpexc_mode = 0;
659 kvmppc_load_up_fpu();
660 break;
661 case MSR_VEC:
662#ifdef CONFIG_ALTIVEC
663 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
664 t->vscr = vcpu->arch.vscr;
665 t->vrsave = -1;
666 kvmppc_load_up_altivec();
667#endif
668 break;
669 case MSR_VSX:
670#ifdef CONFIG_VSX
671 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
672 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
673 kvmppc_load_up_vsx();
674#endif
675 break;
676 default:
677 BUG();
678 }
679
680 vcpu->arch.guest_owned_ext |= msr;
681
682 kvmppc_recalc_shadow_msr(vcpu);
683
684 return RESUME_GUEST;
685}
686
542int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 687int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
543 unsigned int exit_nr) 688 unsigned int exit_nr)
544{ 689{
@@ -563,7 +708,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
563 case BOOK3S_INTERRUPT_INST_STORAGE: 708 case BOOK3S_INTERRUPT_INST_STORAGE:
564 vcpu->stat.pf_instruc++; 709 vcpu->stat.pf_instruc++;
565 /* only care about PTEG not found errors, but leave NX alone */ 710 /* only care about PTEG not found errors, but leave NX alone */
566 if (vcpu->arch.shadow_msr & 0x40000000) { 711 if (vcpu->arch.shadow_srr1 & 0x40000000) {
567 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); 712 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
568 vcpu->stat.sp_instruc++; 713 vcpu->stat.sp_instruc++;
569 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 714 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -575,7 +720,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
575 */ 720 */
576 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 721 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
577 } else { 722 } else {
578 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); 723 vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
579 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 724 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
580 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); 725 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
581 r = RESUME_GUEST; 726 r = RESUME_GUEST;
@@ -621,6 +766,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
621 case BOOK3S_INTERRUPT_PROGRAM: 766 case BOOK3S_INTERRUPT_PROGRAM:
622 { 767 {
623 enum emulation_result er; 768 enum emulation_result er;
769 ulong flags;
770
771 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
624 772
625 if (vcpu->arch.msr & MSR_PR) { 773 if (vcpu->arch.msr & MSR_PR) {
626#ifdef EXIT_DEBUG 774#ifdef EXIT_DEBUG
@@ -628,7 +776,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
628#endif 776#endif
629 if ((vcpu->arch.last_inst & 0xff0007ff) != 777 if ((vcpu->arch.last_inst & 0xff0007ff) !=
630 (INS_DCBZ & 0xfffffff7)) { 778 (INS_DCBZ & 0xfffffff7)) {
631 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 779 kvmppc_core_queue_program(vcpu, flags);
632 r = RESUME_GUEST; 780 r = RESUME_GUEST;
633 break; 781 break;
634 } 782 }
@@ -638,12 +786,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
638 er = kvmppc_emulate_instruction(run, vcpu); 786 er = kvmppc_emulate_instruction(run, vcpu);
639 switch (er) { 787 switch (er) {
640 case EMULATE_DONE: 788 case EMULATE_DONE:
641 r = RESUME_GUEST; 789 r = RESUME_GUEST_NV;
642 break; 790 break;
643 case EMULATE_FAIL: 791 case EMULATE_FAIL:
644 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 792 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
645 __func__, vcpu->arch.pc, vcpu->arch.last_inst); 793 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
646 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 794 kvmppc_core_queue_program(vcpu, flags);
647 r = RESUME_GUEST; 795 r = RESUME_GUEST;
648 break; 796 break;
649 default: 797 default:
@@ -653,23 +801,30 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
653 } 801 }
654 case BOOK3S_INTERRUPT_SYSCALL: 802 case BOOK3S_INTERRUPT_SYSCALL:
655#ifdef EXIT_DEBUG 803#ifdef EXIT_DEBUG
656 printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); 804 printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
657#endif 805#endif
658 vcpu->stat.syscall_exits++; 806 vcpu->stat.syscall_exits++;
659 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 807 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
660 r = RESUME_GUEST; 808 r = RESUME_GUEST;
661 break; 809 break;
662 case BOOK3S_INTERRUPT_MACHINE_CHECK:
663 case BOOK3S_INTERRUPT_FP_UNAVAIL: 810 case BOOK3S_INTERRUPT_FP_UNAVAIL:
664 case BOOK3S_INTERRUPT_TRACE: 811 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
812 break;
665 case BOOK3S_INTERRUPT_ALTIVEC: 813 case BOOK3S_INTERRUPT_ALTIVEC:
814 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
815 break;
666 case BOOK3S_INTERRUPT_VSX: 816 case BOOK3S_INTERRUPT_VSX:
817 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX);
818 break;
819 case BOOK3S_INTERRUPT_MACHINE_CHECK:
820 case BOOK3S_INTERRUPT_TRACE:
667 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 821 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
668 r = RESUME_GUEST; 822 r = RESUME_GUEST;
669 break; 823 break;
670 default: 824 default:
671 /* Ugh - bork here! What did we get? */ 825 /* Ugh - bork here! What did we get? */
672 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); 826 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
827 exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
673 r = RESUME_HOST; 828 r = RESUME_HOST;
674 BUG(); 829 BUG();
675 break; 830 break;
@@ -712,10 +867,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
712 int i; 867 int i;
713 868
714 regs->pc = vcpu->arch.pc; 869 regs->pc = vcpu->arch.pc;
715 regs->cr = vcpu->arch.cr; 870 regs->cr = kvmppc_get_cr(vcpu);
716 regs->ctr = vcpu->arch.ctr; 871 regs->ctr = vcpu->arch.ctr;
717 regs->lr = vcpu->arch.lr; 872 regs->lr = vcpu->arch.lr;
718 regs->xer = vcpu->arch.xer; 873 regs->xer = kvmppc_get_xer(vcpu);
719 regs->msr = vcpu->arch.msr; 874 regs->msr = vcpu->arch.msr;
720 regs->srr0 = vcpu->arch.srr0; 875 regs->srr0 = vcpu->arch.srr0;
721 regs->srr1 = vcpu->arch.srr1; 876 regs->srr1 = vcpu->arch.srr1;
@@ -729,7 +884,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
729 regs->sprg7 = vcpu->arch.sprg6; 884 regs->sprg7 = vcpu->arch.sprg6;
730 885
731 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 886 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
732 regs->gpr[i] = vcpu->arch.gpr[i]; 887 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
733 888
734 return 0; 889 return 0;
735} 890}
@@ -739,10 +894,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
739 int i; 894 int i;
740 895
741 vcpu->arch.pc = regs->pc; 896 vcpu->arch.pc = regs->pc;
742 vcpu->arch.cr = regs->cr; 897 kvmppc_set_cr(vcpu, regs->cr);
743 vcpu->arch.ctr = regs->ctr; 898 vcpu->arch.ctr = regs->ctr;
744 vcpu->arch.lr = regs->lr; 899 vcpu->arch.lr = regs->lr;
745 vcpu->arch.xer = regs->xer; 900 kvmppc_set_xer(vcpu, regs->xer);
746 kvmppc_set_msr(vcpu, regs->msr); 901 kvmppc_set_msr(vcpu, regs->msr);
747 vcpu->arch.srr0 = regs->srr0; 902 vcpu->arch.srr0 = regs->srr0;
748 vcpu->arch.srr1 = regs->srr1; 903 vcpu->arch.srr1 = regs->srr1;
@@ -754,8 +909,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
754 vcpu->arch.sprg6 = regs->sprg5; 909 vcpu->arch.sprg6 = regs->sprg5;
755 vcpu->arch.sprg7 = regs->sprg6; 910 vcpu->arch.sprg7 = regs->sprg6;
756 911
757 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) 912 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
758 vcpu->arch.gpr[i] = regs->gpr[i]; 913 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
759 914
760 return 0; 915 return 0;
761} 916}
@@ -850,7 +1005,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
850 int is_dirty = 0; 1005 int is_dirty = 0;
851 int r, n; 1006 int r, n;
852 1007
853 down_write(&kvm->slots_lock); 1008 mutex_lock(&kvm->slots_lock);
854 1009
855 r = kvm_get_dirty_log(kvm, log, &is_dirty); 1010 r = kvm_get_dirty_log(kvm, log, &is_dirty);
856 if (r) 1011 if (r)
@@ -858,7 +1013,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
858 1013
859 /* If nothing is dirty, don't bother messing with page tables. */ 1014 /* If nothing is dirty, don't bother messing with page tables. */
860 if (is_dirty) { 1015 if (is_dirty) {
861 memslot = &kvm->memslots[log->slot]; 1016 memslot = &kvm->memslots->memslots[log->slot];
862 1017
863 ga = memslot->base_gfn << PAGE_SHIFT; 1018 ga = memslot->base_gfn << PAGE_SHIFT;
864 ga_end = ga + (memslot->npages << PAGE_SHIFT); 1019 ga_end = ga + (memslot->npages << PAGE_SHIFT);
@@ -872,7 +1027,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
872 1027
873 r = 0; 1028 r = 0;
874out: 1029out:
875 up_write(&kvm->slots_lock); 1030 mutex_unlock(&kvm->slots_lock);
876 return r; 1031 return r;
877} 1032}
878 1033
@@ -910,6 +1065,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
910 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; 1065 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
911 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; 1066 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
912 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; 1067 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
1068 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
913 1069
914 vcpu->arch.shadow_msr = MSR_USER64; 1070 vcpu->arch.shadow_msr = MSR_USER64;
915 1071
@@ -943,6 +1099,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
943int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1099int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
944{ 1100{
945 int ret; 1101 int ret;
1102 struct thread_struct ext_bkp;
1103 bool save_vec = current->thread.used_vr;
1104 bool save_vsx = current->thread.used_vsr;
1105 ulong ext_msr;
946 1106
947 /* No need to go into the guest when all we do is going out */ 1107 /* No need to go into the guest when all we do is going out */
948 if (signal_pending(current)) { 1108 if (signal_pending(current)) {
@@ -950,6 +1110,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
950 return -EINTR; 1110 return -EINTR;
951 } 1111 }
952 1112
1113 /* Save FPU state in stack */
1114 if (current->thread.regs->msr & MSR_FP)
1115 giveup_fpu(current);
1116 memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr));
1117 ext_bkp.fpscr = current->thread.fpscr;
1118 ext_bkp.fpexc_mode = current->thread.fpexc_mode;
1119
1120#ifdef CONFIG_ALTIVEC
1121 /* Save Altivec state in stack */
1122 if (save_vec) {
1123 if (current->thread.regs->msr & MSR_VEC)
1124 giveup_altivec(current);
1125 memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr));
1126 ext_bkp.vscr = current->thread.vscr;
1127 ext_bkp.vrsave = current->thread.vrsave;
1128 }
1129 ext_bkp.used_vr = current->thread.used_vr;
1130#endif
1131
1132#ifdef CONFIG_VSX
1133 /* Save VSX state in stack */
1134 if (save_vsx && (current->thread.regs->msr & MSR_VSX))
1135 __giveup_vsx(current);
1136 ext_bkp.used_vsr = current->thread.used_vsr;
1137#endif
1138
1139 /* Remember the MSR with disabled extensions */
1140 ext_msr = current->thread.regs->msr;
1141
953 /* XXX we get called with irq disabled - change that! */ 1142 /* XXX we get called with irq disabled - change that! */
954 local_irq_enable(); 1143 local_irq_enable();
955 1144
@@ -957,6 +1146,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
957 1146
958 local_irq_disable(); 1147 local_irq_disable();
959 1148
1149 current->thread.regs->msr = ext_msr;
1150
1151 /* Make sure we save the guest FPU/Altivec/VSX state */
1152 kvmppc_giveup_ext(vcpu, MSR_FP);
1153 kvmppc_giveup_ext(vcpu, MSR_VEC);
1154 kvmppc_giveup_ext(vcpu, MSR_VSX);
1155
1156 /* Restore FPU state from stack */
1157 memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr));
1158 current->thread.fpscr = ext_bkp.fpscr;
1159 current->thread.fpexc_mode = ext_bkp.fpexc_mode;
1160
1161#ifdef CONFIG_ALTIVEC
1162 /* Restore Altivec state from stack */
1163 if (save_vec && current->thread.used_vr) {
1164 memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr));
1165 current->thread.vscr = ext_bkp.vscr;
1166 current->thread.vrsave= ext_bkp.vrsave;
1167 }
1168 current->thread.used_vr = ext_bkp.used_vr;
1169#endif
1170
1171#ifdef CONFIG_VSX
1172 current->thread.used_vsr = ext_bkp.used_vsr;
1173#endif
1174
960 return ret; 1175 return ret;
961} 1176}
962 1177
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
index 1027eac6d474..2b0ee7e040c9 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
65 case 31: 65 case 31:
66 switch (get_xop(inst)) { 66 switch (get_xop(inst)) {
67 case OP_31_XOP_MFMSR: 67 case OP_31_XOP_MFMSR:
68 vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr; 68 kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
69 break; 69 break;
70 case OP_31_XOP_MTMSRD: 70 case OP_31_XOP_MTMSRD:
71 { 71 {
72 ulong rs = vcpu->arch.gpr[get_rs(inst)]; 72 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
73 if (inst & 0x10000) { 73 if (inst & 0x10000) {
74 vcpu->arch.msr &= ~(MSR_RI | MSR_EE); 74 vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
75 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); 75 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
@@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
78 break; 78 break;
79 } 79 }
80 case OP_31_XOP_MTMSR: 80 case OP_31_XOP_MTMSR:
81 kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]); 81 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
82 break; 82 break;
83 case OP_31_XOP_MFSRIN: 83 case OP_31_XOP_MFSRIN:
84 { 84 {
85 int srnum; 85 int srnum;
86 86
87 srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf; 87 srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
88 if (vcpu->arch.mmu.mfsrin) { 88 if (vcpu->arch.mmu.mfsrin) {
89 u32 sr; 89 u32 sr;
90 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 90 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
91 vcpu->arch.gpr[get_rt(inst)] = sr; 91 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
92 } 92 }
93 break; 93 break;
94 } 94 }
95 case OP_31_XOP_MTSRIN: 95 case OP_31_XOP_MTSRIN:
96 vcpu->arch.mmu.mtsrin(vcpu, 96 vcpu->arch.mmu.mtsrin(vcpu,
97 (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf, 97 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
98 vcpu->arch.gpr[get_rs(inst)]); 98 kvmppc_get_gpr(vcpu, get_rs(inst)));
99 break; 99 break;
100 case OP_31_XOP_TLBIE: 100 case OP_31_XOP_TLBIE:
101 case OP_31_XOP_TLBIEL: 101 case OP_31_XOP_TLBIEL:
102 { 102 {
103 bool large = (inst & 0x00200000) ? true : false; 103 bool large = (inst & 0x00200000) ? true : false;
104 ulong addr = vcpu->arch.gpr[get_rb(inst)]; 104 ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
105 vcpu->arch.mmu.tlbie(vcpu, addr, large); 105 vcpu->arch.mmu.tlbie(vcpu, addr, large);
106 break; 106 break;
107 } 107 }
@@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
111 if (!vcpu->arch.mmu.slbmte) 111 if (!vcpu->arch.mmu.slbmte)
112 return EMULATE_FAIL; 112 return EMULATE_FAIL;
113 113
114 vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)], 114 vcpu->arch.mmu.slbmte(vcpu,
115 vcpu->arch.gpr[get_rb(inst)]); 115 kvmppc_get_gpr(vcpu, get_rs(inst)),
116 kvmppc_get_gpr(vcpu, get_rb(inst)));
116 break; 117 break;
117 case OP_31_XOP_SLBIE: 118 case OP_31_XOP_SLBIE:
118 if (!vcpu->arch.mmu.slbie) 119 if (!vcpu->arch.mmu.slbie)
119 return EMULATE_FAIL; 120 return EMULATE_FAIL;
120 121
121 vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]); 122 vcpu->arch.mmu.slbie(vcpu,
123 kvmppc_get_gpr(vcpu, get_rb(inst)));
122 break; 124 break;
123 case OP_31_XOP_SLBIA: 125 case OP_31_XOP_SLBIA:
124 if (!vcpu->arch.mmu.slbia) 126 if (!vcpu->arch.mmu.slbia)
@@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
132 } else { 134 } else {
133 ulong t, rb; 135 ulong t, rb;
134 136
135 rb = vcpu->arch.gpr[get_rb(inst)]; 137 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
136 t = vcpu->arch.mmu.slbmfee(vcpu, rb); 138 t = vcpu->arch.mmu.slbmfee(vcpu, rb);
137 vcpu->arch.gpr[get_rt(inst)] = t; 139 kvmppc_set_gpr(vcpu, get_rt(inst), t);
138 } 140 }
139 break; 141 break;
140 case OP_31_XOP_SLBMFEV: 142 case OP_31_XOP_SLBMFEV:
@@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
143 } else { 145 } else {
144 ulong t, rb; 146 ulong t, rb;
145 147
146 rb = vcpu->arch.gpr[get_rb(inst)]; 148 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
147 t = vcpu->arch.mmu.slbmfev(vcpu, rb); 149 t = vcpu->arch.mmu.slbmfev(vcpu, rb);
148 vcpu->arch.gpr[get_rt(inst)] = t; 150 kvmppc_set_gpr(vcpu, get_rt(inst), t);
149 } 151 }
150 break; 152 break;
151 case OP_31_XOP_DCBZ: 153 case OP_31_XOP_DCBZ:
152 { 154 {
153 ulong rb = vcpu->arch.gpr[get_rb(inst)]; 155 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
154 ulong ra = 0; 156 ulong ra = 0;
155 ulong addr; 157 ulong addr;
156 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 158 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
157 159
158 if (get_ra(inst)) 160 if (get_ra(inst))
159 ra = vcpu->arch.gpr[get_ra(inst)]; 161 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
160 162
161 addr = (ra + rb) & ~31ULL; 163 addr = (ra + rb) & ~31ULL;
162 if (!(vcpu->arch.msr & MSR_SF)) 164 if (!(vcpu->arch.msr & MSR_SF))
@@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
233int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 235int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
234{ 236{
235 int emulated = EMULATE_DONE; 237 int emulated = EMULATE_DONE;
238 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
236 239
237 switch (sprn) { 240 switch (sprn) {
238 case SPRN_SDR1: 241 case SPRN_SDR1:
239 to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs]; 242 to_book3s(vcpu)->sdr1 = spr_val;
240 break; 243 break;
241 case SPRN_DSISR: 244 case SPRN_DSISR:
242 to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs]; 245 to_book3s(vcpu)->dsisr = spr_val;
243 break; 246 break;
244 case SPRN_DAR: 247 case SPRN_DAR:
245 vcpu->arch.dear = vcpu->arch.gpr[rs]; 248 vcpu->arch.dear = spr_val;
246 break; 249 break;
247 case SPRN_HIOR: 250 case SPRN_HIOR:
248 to_book3s(vcpu)->hior = vcpu->arch.gpr[rs]; 251 to_book3s(vcpu)->hior = spr_val;
249 break; 252 break;
250 case SPRN_IBAT0U ... SPRN_IBAT3L: 253 case SPRN_IBAT0U ... SPRN_IBAT3L:
251 case SPRN_IBAT4U ... SPRN_IBAT7L: 254 case SPRN_IBAT4U ... SPRN_IBAT7L:
252 case SPRN_DBAT0U ... SPRN_DBAT3L: 255 case SPRN_DBAT0U ... SPRN_DBAT3L:
253 case SPRN_DBAT4U ... SPRN_DBAT7L: 256 case SPRN_DBAT4U ... SPRN_DBAT7L:
254 kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); 257 kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
255 /* BAT writes happen so rarely that we're ok to flush 258 /* BAT writes happen so rarely that we're ok to flush
256 * everything here */ 259 * everything here */
257 kvmppc_mmu_pte_flush(vcpu, 0, 0); 260 kvmppc_mmu_pte_flush(vcpu, 0, 0);
258 break; 261 break;
259 case SPRN_HID0: 262 case SPRN_HID0:
260 to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs]; 263 to_book3s(vcpu)->hid[0] = spr_val;
261 break; 264 break;
262 case SPRN_HID1: 265 case SPRN_HID1:
263 to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs]; 266 to_book3s(vcpu)->hid[1] = spr_val;
264 break; 267 break;
265 case SPRN_HID2: 268 case SPRN_HID2:
266 to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs]; 269 to_book3s(vcpu)->hid[2] = spr_val;
267 break; 270 break;
268 case SPRN_HID4: 271 case SPRN_HID4:
269 to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs]; 272 to_book3s(vcpu)->hid[4] = spr_val;
270 break; 273 break;
271 case SPRN_HID5: 274 case SPRN_HID5:
272 to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs]; 275 to_book3s(vcpu)->hid[5] = spr_val;
273 /* guest HID5 set can change is_dcbz32 */ 276 /* guest HID5 set can change is_dcbz32 */
274 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 277 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
275 (mfmsr() & MSR_HV)) 278 (mfmsr() & MSR_HV))
@@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
299 302
300 switch (sprn) { 303 switch (sprn) {
301 case SPRN_SDR1: 304 case SPRN_SDR1:
302 vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1; 305 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
303 break; 306 break;
304 case SPRN_DSISR: 307 case SPRN_DSISR:
305 vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr; 308 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
306 break; 309 break;
307 case SPRN_DAR: 310 case SPRN_DAR:
308 vcpu->arch.gpr[rt] = vcpu->arch.dear; 311 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
309 break; 312 break;
310 case SPRN_HIOR: 313 case SPRN_HIOR:
311 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior; 314 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
312 break; 315 break;
313 case SPRN_HID0: 316 case SPRN_HID0:
314 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0]; 317 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
315 break; 318 break;
316 case SPRN_HID1: 319 case SPRN_HID1:
317 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1]; 320 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
318 break; 321 break;
319 case SPRN_HID2: 322 case SPRN_HID2:
320 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2]; 323 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
321 break; 324 break;
322 case SPRN_HID4: 325 case SPRN_HID4:
323 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4]; 326 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
324 break; 327 break;
325 case SPRN_HID5: 328 case SPRN_HID5:
326 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5]; 329 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
327 break; 330 break;
328 case SPRN_THRM1: 331 case SPRN_THRM1:
329 case SPRN_THRM2: 332 case SPRN_THRM2:
330 case SPRN_THRM3: 333 case SPRN_THRM3:
331 case SPRN_CTRLF: 334 case SPRN_CTRLF:
332 case SPRN_CTRLT: 335 case SPRN_CTRLT:
333 vcpu->arch.gpr[rt] = 0; 336 kvmppc_set_gpr(vcpu, rt, 0);
334 break; 337 break;
335 default: 338 default:
336 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); 339 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c
index 5b2db38ed86c..1dd5a1ddfd0d 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_64_exports.c
@@ -22,3 +22,11 @@
22 22
23EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); 23EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
24EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); 24EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
25EXPORT_SYMBOL_GPL(kvmppc_rmcall);
26EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
27#ifdef CONFIG_ALTIVEC
28EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
29#endif
30#ifdef CONFIG_VSX
31EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
32#endif
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
index 7b55d8094c8b..c1584d0cbce8 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -28,11 +28,6 @@
28#define ULONG_SIZE 8 28#define ULONG_SIZE 8
29#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 29#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
30 30
31.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
32 ld \tmp_reg, (PACA_EXMC+\offset)(r13)
33 std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
34.endm
35
36.macro DISABLE_INTERRUPTS 31.macro DISABLE_INTERRUPTS
37 mfmsr r0 32 mfmsr r0
38 rldicl r0,r0,48,1 33 rldicl r0,r0,48,1
@@ -40,6 +35,26 @@
40 mtmsrd r0,1 35 mtmsrd r0,1
41.endm 36.endm
42 37
38#define VCPU_LOAD_NVGPRS(vcpu) \
39 ld r14, VCPU_GPR(r14)(vcpu); \
40 ld r15, VCPU_GPR(r15)(vcpu); \
41 ld r16, VCPU_GPR(r16)(vcpu); \
42 ld r17, VCPU_GPR(r17)(vcpu); \
43 ld r18, VCPU_GPR(r18)(vcpu); \
44 ld r19, VCPU_GPR(r19)(vcpu); \
45 ld r20, VCPU_GPR(r20)(vcpu); \
46 ld r21, VCPU_GPR(r21)(vcpu); \
47 ld r22, VCPU_GPR(r22)(vcpu); \
48 ld r23, VCPU_GPR(r23)(vcpu); \
49 ld r24, VCPU_GPR(r24)(vcpu); \
50 ld r25, VCPU_GPR(r25)(vcpu); \
51 ld r26, VCPU_GPR(r26)(vcpu); \
52 ld r27, VCPU_GPR(r27)(vcpu); \
53 ld r28, VCPU_GPR(r28)(vcpu); \
54 ld r29, VCPU_GPR(r29)(vcpu); \
55 ld r30, VCPU_GPR(r30)(vcpu); \
56 ld r31, VCPU_GPR(r31)(vcpu); \
57
43/***************************************************************************** 58/*****************************************************************************
44 * * 59 * *
45 * Guest entry / exit code that is in kernel module memory (highmem) * 60 * Guest entry / exit code that is in kernel module memory (highmem) *
@@ -67,61 +82,32 @@ kvm_start_entry:
67 SAVE_NVGPRS(r1) 82 SAVE_NVGPRS(r1)
68 83
69 /* Save LR */ 84 /* Save LR */
70 mflr r14 85 std r0, _LINK(r1)
71 std r14, _LINK(r1)
72
73/* XXX optimize non-volatile loading away */
74kvm_start_lightweight:
75 86
76 DISABLE_INTERRUPTS 87 /* Load non-volatile guest state from the vcpu */
88 VCPU_LOAD_NVGPRS(r4)
77 89
78 /* Save R1/R2 in the PACA */ 90 /* Save R1/R2 in the PACA */
79 std r1, PACAR1(r13) 91 std r1, PACA_KVM_HOST_R1(r13)
80 std r2, (PACA_EXMC+EX_SRR0)(r13) 92 std r2, PACA_KVM_HOST_R2(r13)
93
94 /* XXX swap in/out on load? */
81 ld r3, VCPU_HIGHMEM_HANDLER(r4) 95 ld r3, VCPU_HIGHMEM_HANDLER(r4)
82 std r3, PACASAVEDMSR(r13) 96 std r3, PACA_KVM_VMHANDLER(r13)
83 97
84 /* Load non-volatile guest state from the vcpu */ 98kvm_start_lightweight:
85 ld r14, VCPU_GPR(r14)(r4)
86 ld r15, VCPU_GPR(r15)(r4)
87 ld r16, VCPU_GPR(r16)(r4)
88 ld r17, VCPU_GPR(r17)(r4)
89 ld r18, VCPU_GPR(r18)(r4)
90 ld r19, VCPU_GPR(r19)(r4)
91 ld r20, VCPU_GPR(r20)(r4)
92 ld r21, VCPU_GPR(r21)(r4)
93 ld r22, VCPU_GPR(r22)(r4)
94 ld r23, VCPU_GPR(r23)(r4)
95 ld r24, VCPU_GPR(r24)(r4)
96 ld r25, VCPU_GPR(r25)(r4)
97 ld r26, VCPU_GPR(r26)(r4)
98 ld r27, VCPU_GPR(r27)(r4)
99 ld r28, VCPU_GPR(r28)(r4)
100 ld r29, VCPU_GPR(r29)(r4)
101 ld r30, VCPU_GPR(r30)(r4)
102 ld r31, VCPU_GPR(r31)(r4)
103 99
104 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ 100 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
105 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 101 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
106 102
107 ld r3, VCPU_TRAMPOLINE_ENTER(r4) 103 /* Load some guest state in the respective registers */
108 mtsrr0 r3 104 ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
109 105 /* will be swapped in by rmcall */
110 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
111 mtsrr1 r3
112
113 /* Load guest state in the respective registers */
114 lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */
115 stw r3, (PACA_EXMC + EX_CCR)(r13)
116
117 ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
118 mtctr r3 /* CTR = r3 */
119 106
120 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ 107 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
121 mtlr r3 /* LR = r3 */ 108 mtlr r3 /* LR = r3 */
122 109
123 ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */ 110 DISABLE_INTERRUPTS
124 std r3, (PACA_EXMC + EX_R3)(r13)
125 111
126 /* Some guests may need to have dcbz set to 32 byte length. 112 /* Some guests may need to have dcbz set to 32 byte length.
127 * 113 *
@@ -141,36 +127,15 @@ kvm_start_lightweight:
141 mtspr SPRN_HID5,r3 127 mtspr SPRN_HID5,r3
142 128
143no_dcbz32_on: 129no_dcbz32_on:
144 /* Load guest GPRs */ 130
145 131 ld r6, VCPU_RMCALL(r4)
146 ld r3, VCPU_GPR(r9)(r4) 132 mtctr r6
147 std r3, (PACA_EXMC + EX_R9)(r13) 133
148 ld r3, VCPU_GPR(r10)(r4) 134 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
149 std r3, (PACA_EXMC + EX_R10)(r13) 135 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
150 ld r3, VCPU_GPR(r11)(r4)
151 std r3, (PACA_EXMC + EX_R11)(r13)
152 ld r3, VCPU_GPR(r12)(r4)
153 std r3, (PACA_EXMC + EX_R12)(r13)
154 ld r3, VCPU_GPR(r13)(r4)
155 std r3, (PACA_EXMC + EX_R13)(r13)
156
157 ld r0, VCPU_GPR(r0)(r4)
158 ld r1, VCPU_GPR(r1)(r4)
159 ld r2, VCPU_GPR(r2)(r4)
160 ld r3, VCPU_GPR(r3)(r4)
161 ld r5, VCPU_GPR(r5)(r4)
162 ld r6, VCPU_GPR(r6)(r4)
163 ld r7, VCPU_GPR(r7)(r4)
164 ld r8, VCPU_GPR(r8)(r4)
165 ld r4, VCPU_GPR(r4)(r4)
166
167 /* This sets the Magic value for the trampoline */
168
169 li r11, 1
170 stb r11, PACA_KVM_IN_GUEST(r13)
171 136
172 /* Jump to SLB patching handlder and into our guest */ 137 /* Jump to SLB patching handlder and into our guest */
173 RFI 138 bctr
174 139
175/* 140/*
176 * This is the handler in module memory. It gets jumped at from the 141 * This is the handler in module memory. It gets jumped at from the
@@ -184,125 +149,70 @@ kvmppc_handler_highmem:
184 /* 149 /*
185 * Register usage at this point: 150 * Register usage at this point:
186 * 151 *
187 * R00 = guest R13 152 * R0 = guest last inst
188 * R01 = host R1 153 * R1 = host R1
189 * R02 = host R2 154 * R2 = host R2
190 * R10 = guest PC 155 * R3 = guest PC
191 * R11 = guest MSR 156 * R4 = guest MSR
192 * R12 = exit handler id 157 * R5 = guest DAR
193 * R13 = PACA 158 * R6 = guest DSISR
194 * PACA.exmc.R9 = guest R1 159 * R13 = PACA
195 * PACA.exmc.R10 = guest R10 160 * PACA.KVM.* = guest *
196 * PACA.exmc.R11 = guest R11
197 * PACA.exmc.R12 = guest R12
198 * PACA.exmc.R13 = guest R2
199 * PACA.exmc.DAR = guest DAR
200 * PACA.exmc.DSISR = guest DSISR
201 * PACA.exmc.LR = guest instruction
202 * PACA.exmc.CCR = guest CR
203 * PACA.exmc.SRR0 = guest R0
204 * 161 *
205 */ 162 */
206 163
207 std r3, (PACA_EXMC+EX_R3)(r13) 164 /* R7 = vcpu */
165 ld r7, GPR4(r1)
208 166
209 /* save the exit id in R3 */ 167 /* Now save the guest state */
210 mr r3, r12
211 168
212 /* R12 = vcpu */ 169 stw r0, VCPU_LAST_INST(r7)
213 ld r12, GPR4(r1)
214 170
215 /* Now save the guest state */ 171 std r3, VCPU_PC(r7)
172 std r4, VCPU_SHADOW_SRR1(r7)
173 std r5, VCPU_FAULT_DEAR(r7)
174 std r6, VCPU_FAULT_DSISR(r7)
216 175
217 std r0, VCPU_GPR(r13)(r12) 176 ld r5, VCPU_HFLAGS(r7)
218 std r4, VCPU_GPR(r4)(r12)
219 std r5, VCPU_GPR(r5)(r12)
220 std r6, VCPU_GPR(r6)(r12)
221 std r7, VCPU_GPR(r7)(r12)
222 std r8, VCPU_GPR(r8)(r12)
223 std r9, VCPU_GPR(r9)(r12)
224
225 /* get registers from PACA */
226 mfpaca r5, r0, EX_SRR0, r12
227 mfpaca r5, r3, EX_R3, r12
228 mfpaca r5, r1, EX_R9, r12
229 mfpaca r5, r10, EX_R10, r12
230 mfpaca r5, r11, EX_R11, r12
231 mfpaca r5, r12, EX_R12, r12
232 mfpaca r5, r2, EX_R13, r12
233
234 lwz r5, (PACA_EXMC+EX_LR)(r13)
235 stw r5, VCPU_LAST_INST(r12)
236
237 lwz r5, (PACA_EXMC+EX_CCR)(r13)
238 stw r5, VCPU_CR(r12)
239
240 ld r5, VCPU_HFLAGS(r12)
241 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ 177 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
242 beq no_dcbz32_off 178 beq no_dcbz32_off
243 179
180 li r4, 0
244 mfspr r5,SPRN_HID5 181 mfspr r5,SPRN_HID5
245 rldimi r5,r5,6,56 182 rldimi r5,r4,6,56
246 mtspr SPRN_HID5,r5 183 mtspr SPRN_HID5,r5
247 184
248no_dcbz32_off: 185no_dcbz32_off:
249 186
250 /* XXX maybe skip on lightweight? */ 187 std r14, VCPU_GPR(r14)(r7)
251 std r14, VCPU_GPR(r14)(r12) 188 std r15, VCPU_GPR(r15)(r7)
252 std r15, VCPU_GPR(r15)(r12) 189 std r16, VCPU_GPR(r16)(r7)
253 std r16, VCPU_GPR(r16)(r12) 190 std r17, VCPU_GPR(r17)(r7)
254 std r17, VCPU_GPR(r17)(r12) 191 std r18, VCPU_GPR(r18)(r7)
255 std r18, VCPU_GPR(r18)(r12) 192 std r19, VCPU_GPR(r19)(r7)
256 std r19, VCPU_GPR(r19)(r12) 193 std r20, VCPU_GPR(r20)(r7)
257 std r20, VCPU_GPR(r20)(r12) 194 std r21, VCPU_GPR(r21)(r7)
258 std r21, VCPU_GPR(r21)(r12) 195 std r22, VCPU_GPR(r22)(r7)
259 std r22, VCPU_GPR(r22)(r12) 196 std r23, VCPU_GPR(r23)(r7)
260 std r23, VCPU_GPR(r23)(r12) 197 std r24, VCPU_GPR(r24)(r7)
261 std r24, VCPU_GPR(r24)(r12) 198 std r25, VCPU_GPR(r25)(r7)
262 std r25, VCPU_GPR(r25)(r12) 199 std r26, VCPU_GPR(r26)(r7)
263 std r26, VCPU_GPR(r26)(r12) 200 std r27, VCPU_GPR(r27)(r7)
264 std r27, VCPU_GPR(r27)(r12) 201 std r28, VCPU_GPR(r28)(r7)
265 std r28, VCPU_GPR(r28)(r12) 202 std r29, VCPU_GPR(r29)(r7)
266 std r29, VCPU_GPR(r29)(r12) 203 std r30, VCPU_GPR(r30)(r7)
267 std r30, VCPU_GPR(r30)(r12) 204 std r31, VCPU_GPR(r31)(r7)
268 std r31, VCPU_GPR(r31)(r12) 205
269 206 /* Save guest CTR */
270 /* Restore non-volatile host registers (r14 - r31) */
271 REST_NVGPRS(r1)
272
273 /* Save guest PC (R10) */
274 std r10, VCPU_PC(r12)
275
276 /* Save guest msr (R11) */
277 std r11, VCPU_SHADOW_MSR(r12)
278
279 /* Save guest CTR (in R12) */
280 mfctr r5 207 mfctr r5
281 std r5, VCPU_CTR(r12) 208 std r5, VCPU_CTR(r7)
282 209
283 /* Save guest LR */ 210 /* Save guest LR */
284 mflr r5 211 mflr r5
285 std r5, VCPU_LR(r12) 212 std r5, VCPU_LR(r7)
286
287 /* Save guest XER */
288 mfxer r5
289 std r5, VCPU_XER(r12)
290
291 /* Save guest DAR */
292 ld r5, (PACA_EXMC+EX_DAR)(r13)
293 std r5, VCPU_FAULT_DEAR(r12)
294
295 /* Save guest DSISR */
296 lwz r5, (PACA_EXMC+EX_DSISR)(r13)
297 std r5, VCPU_FAULT_DSISR(r12)
298 213
299 /* Restore host msr -> SRR1 */ 214 /* Restore host msr -> SRR1 */
300 ld r7, VCPU_HOST_MSR(r12) 215 ld r6, VCPU_HOST_MSR(r7)
301 mtsrr1 r7
302
303 /* Restore host IP -> SRR0 */
304 ld r6, VCPU_HOST_RETIP(r12)
305 mtsrr0 r6
306 216
307 /* 217 /*
308 * For some interrupts, we need to call the real Linux 218 * For some interrupts, we need to call the real Linux
@@ -314,13 +224,14 @@ no_dcbz32_off:
314 * r3 = address of interrupt handler (exit reason) 224 * r3 = address of interrupt handler (exit reason)
315 */ 225 */
316 226
317 cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL 227 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
318 beq call_linux_handler 228 beq call_linux_handler
319 cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER 229 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
320 beq call_linux_handler 230 beq call_linux_handler
321 231
322 /* Back to Interruptable Mode! (goto kvm_return_point) */ 232 /* Back to EE=1 */
323 RFI 233 mtmsr r6
234 b kvm_return_point
324 235
325call_linux_handler: 236call_linux_handler:
326 237
@@ -333,16 +244,22 @@ call_linux_handler:
333 * interrupt handler! 244 * interrupt handler!
334 * 245 *
335 * R3 still contains the exit code, 246 * R3 still contains the exit code,
336 * R6 VCPU_HOST_RETIP and 247 * R5 VCPU_HOST_RETIP and
337 * R7 VCPU_HOST_MSR 248 * R6 VCPU_HOST_MSR
338 */ 249 */
339 250
340 mtlr r3 251 /* Restore host IP -> SRR0 */
252 ld r5, VCPU_HOST_RETIP(r7)
253
254 /* XXX Better move to a safe function?
255 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
341 256
342 ld r5, VCPU_TRAMPOLINE_LOWMEM(r12) 257 mtlr r12
343 mtsrr0 r5 258
344 LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 259 ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
345 mtsrr1 r5 260 mtsrr0 r4
261 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
262 mtsrr1 r3
346 263
347 RFI 264 RFI
348 265
@@ -351,42 +268,51 @@ kvm_return_point:
351 268
352 /* Jump back to lightweight entry if we're supposed to */ 269 /* Jump back to lightweight entry if we're supposed to */
353 /* go back into the guest */ 270 /* go back into the guest */
354 mr r5, r3 271
272 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
273 mr r5, r12
274
355 /* Restore r3 (kvm_run) and r4 (vcpu) */ 275 /* Restore r3 (kvm_run) and r4 (vcpu) */
356 REST_2GPRS(3, r1) 276 REST_2GPRS(3, r1)
357 bl KVMPPC_HANDLE_EXIT 277 bl KVMPPC_HANDLE_EXIT
358 278
359#if 0 /* XXX get lightweight exits back */ 279 /* If RESUME_GUEST, get back in the loop */
360 cmpwi r3, RESUME_GUEST 280 cmpwi r3, RESUME_GUEST
361 bne kvm_exit_heavyweight 281 beq kvm_loop_lightweight
362 282
363 /* put VCPU and KVM_RUN back into place and roll again! */ 283 cmpwi r3, RESUME_GUEST_NV
364 REST_2GPRS(3, r1) 284 beq kvm_loop_heavyweight
365 b kvm_start_lightweight
366 285
367kvm_exit_heavyweight: 286kvm_exit_loop:
368 /* Restore non-volatile host registers */
369 ld r14, _LINK(r1)
370 mtlr r14
371 REST_NVGPRS(r1)
372 287
373 addi r1, r1, SWITCH_FRAME_SIZE
374#else
375 ld r4, _LINK(r1) 288 ld r4, _LINK(r1)
376 mtlr r4 289 mtlr r4
377 290
378 cmpwi r3, RESUME_GUEST 291 /* Restore non-volatile host registers (r14 - r31) */
379 bne kvm_exit_heavyweight 292 REST_NVGPRS(r1)
293
294 addi r1, r1, SWITCH_FRAME_SIZE
295 blr
296
297kvm_loop_heavyweight:
298
299 ld r4, _LINK(r1)
300 std r4, (16 + SWITCH_FRAME_SIZE)(r1)
380 301
302 /* Load vcpu and cpu_run */
381 REST_2GPRS(3, r1) 303 REST_2GPRS(3, r1)
382 304
383 addi r1, r1, SWITCH_FRAME_SIZE 305 /* Load non-volatile guest state from the vcpu */
306 VCPU_LOAD_NVGPRS(r4)
384 307
385 b kvm_start_entry 308 /* Jump back into the beginning of this function */
309 b kvm_start_lightweight
386 310
387kvm_exit_heavyweight: 311kvm_loop_lightweight:
388 312
389 addi r1, r1, SWITCH_FRAME_SIZE 313 /* We'll need the vcpu pointer */
390#endif 314 REST_GPR(4, r1)
315
316 /* Jump back into the beginning of this function */
317 b kvm_start_lightweight
391 318
392 blr
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index e4beeb371a73..512dcff77554 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -54,7 +54,7 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
54 if (!vcpu_book3s->slb[i].valid) 54 if (!vcpu_book3s->slb[i].valid)
55 continue; 55 continue;
56 56
57 if (vcpu_book3s->slb[i].large) 57 if (vcpu_book3s->slb[i].tb)
58 cmp_esid = esid_1t; 58 cmp_esid = esid_1t;
59 59
60 if (vcpu_book3s->slb[i].esid == cmp_esid) 60 if (vcpu_book3s->slb[i].esid == cmp_esid)
@@ -65,9 +65,10 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
65 eaddr, esid, esid_1t); 65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 66 for (i = 0; i < vcpu_book3s->slb_nr; i++) {
67 if (vcpu_book3s->slb[i].vsid) 67 if (vcpu_book3s->slb[i].vsid)
68 dprintk(" %d: %c%c %llx %llx\n", i, 68 dprintk(" %d: %c%c%c %llx %llx\n", i,
69 vcpu_book3s->slb[i].valid ? 'v' : ' ', 69 vcpu_book3s->slb[i].valid ? 'v' : ' ',
70 vcpu_book3s->slb[i].large ? 'l' : ' ', 70 vcpu_book3s->slb[i].large ? 'l' : ' ',
71 vcpu_book3s->slb[i].tb ? 't' : ' ',
71 vcpu_book3s->slb[i].esid, 72 vcpu_book3s->slb[i].esid,
72 vcpu_book3s->slb[i].vsid); 73 vcpu_book3s->slb[i].vsid);
73 } 74 }
@@ -84,7 +85,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
84 if (!slb) 85 if (!slb)
85 return 0; 86 return 0;
86 87
87 if (slb->large) 88 if (slb->tb)
88 return (((u64)eaddr >> 12) & 0xfffffff) | 89 return (((u64)eaddr >> 12) & 0xfffffff) |
89 (((u64)slb->vsid) << 28); 90 (((u64)slb->vsid) << 28);
90 91
@@ -309,7 +310,8 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
309 slbe = &vcpu_book3s->slb[slb_nr]; 310 slbe = &vcpu_book3s->slb[slb_nr];
310 311
311 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 312 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
312 slbe->esid = slbe->large ? esid_1t : esid; 313 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
314 slbe->esid = slbe->tb ? esid_1t : esid;
313 slbe->vsid = rs >> 12; 315 slbe->vsid = rs >> 12;
314 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; 316 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
315 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; 317 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
index fb7dd2e9ac88..c83c60ad96c5 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S
@@ -45,36 +45,25 @@ kvmppc_trampoline_\intno:
45 * To distinguish, we check a magic byte in the PACA 45 * To distinguish, we check a magic byte in the PACA
46 */ 46 */
47 mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ 47 mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
48 std r12, (PACA_EXMC + EX_R12)(r13) 48 std r12, PACA_KVM_SCRATCH0(r13)
49 mfcr r12 49 mfcr r12
50 stw r12, (PACA_EXMC + EX_CCR)(r13) 50 stw r12, PACA_KVM_SCRATCH1(r13)
51 lbz r12, PACA_KVM_IN_GUEST(r13) 51 lbz r12, PACA_KVM_IN_GUEST(r13)
52 cmpwi r12, 0 52 cmpwi r12, KVM_GUEST_MODE_NONE
53 bne ..kvmppc_handler_hasmagic_\intno 53 bne ..kvmppc_handler_hasmagic_\intno
54 /* No KVM guest? Then jump back to the Linux handler! */ 54 /* No KVM guest? Then jump back to the Linux handler! */
55 lwz r12, (PACA_EXMC + EX_CCR)(r13) 55 lwz r12, PACA_KVM_SCRATCH1(r13)
56 mtcr r12 56 mtcr r12
57 ld r12, (PACA_EXMC + EX_R12)(r13) 57 ld r12, PACA_KVM_SCRATCH0(r13)
58 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ 58 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
59 b kvmppc_resume_\intno /* Get back original handler */ 59 b kvmppc_resume_\intno /* Get back original handler */
60 60
61 /* Now we know we're handling a KVM guest */ 61 /* Now we know we're handling a KVM guest */
62..kvmppc_handler_hasmagic_\intno: 62..kvmppc_handler_hasmagic_\intno:
63 /* Unset guest state */
64 li r12, 0
65 stb r12, PACA_KVM_IN_GUEST(r13)
66 63
67 std r1, (PACA_EXMC+EX_R9)(r13) 64 /* Should we just skip the faulting instruction? */
68 std r10, (PACA_EXMC+EX_R10)(r13) 65 cmpwi r12, KVM_GUEST_MODE_SKIP
69 std r11, (PACA_EXMC+EX_R11)(r13) 66 beq kvmppc_handler_skip_ins
70 std r2, (PACA_EXMC+EX_R13)(r13)
71
72 mfsrr0 r10
73 mfsrr1 r11
74
75 /* Restore R1/R2 so we can handle faults */
76 ld r1, PACAR1(r13)
77 ld r2, (PACA_EXMC+EX_SRR0)(r13)
78 67
79 /* Let's store which interrupt we're handling */ 68 /* Let's store which interrupt we're handling */
80 li r12, \intno 69 li r12, \intno
@@ -102,23 +91,107 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
102INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX 91INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
103 92
104/* 93/*
94 * Bring us back to the faulting code, but skip the
95 * faulting instruction.
96 *
97 * This is a generic exit path from the interrupt
98 * trampolines above.
99 *
100 * Input Registers:
101 *
102 * R12 = free
103 * R13 = PACA
104 * PACA.KVM.SCRATCH0 = guest R12
105 * PACA.KVM.SCRATCH1 = guest CR
106 * SPRG_SCRATCH0 = guest R13
107 *
108 */
109kvmppc_handler_skip_ins:
110
111 /* Patch the IP to the next instruction */
112 mfsrr0 r12
113 addi r12, r12, 4
114 mtsrr0 r12
115
116 /* Clean up all state */
117 lwz r12, PACA_KVM_SCRATCH1(r13)
118 mtcr r12
119 ld r12, PACA_KVM_SCRATCH0(r13)
120 mfspr r13, SPRN_SPRG_SCRATCH0
121
122 /* And get back into the code */
123 RFI
124
125/*
105 * This trampoline brings us back to a real mode handler 126 * This trampoline brings us back to a real mode handler
106 * 127 *
107 * Input Registers: 128 * Input Registers:
108 * 129 *
109 * R6 = SRR0 130 * R5 = SRR0
110 * R7 = SRR1 131 * R6 = SRR1
111 * LR = real-mode IP 132 * LR = real-mode IP
112 * 133 *
113 */ 134 */
114.global kvmppc_handler_lowmem_trampoline 135.global kvmppc_handler_lowmem_trampoline
115kvmppc_handler_lowmem_trampoline: 136kvmppc_handler_lowmem_trampoline:
116 137
117 mtsrr0 r6 138 mtsrr0 r5
118 mtsrr1 r7 139 mtsrr1 r6
119 blr 140 blr
120kvmppc_handler_lowmem_trampoline_end: 141kvmppc_handler_lowmem_trampoline_end:
121 142
143/*
144 * Call a function in real mode
145 *
146 * Input Registers:
147 *
148 * R3 = function
149 * R4 = MSR
150 * R5 = CTR
151 *
152 */
153_GLOBAL(kvmppc_rmcall)
154 mtmsr r4 /* Disable relocation, so mtsrr
155 doesn't get interrupted */
156 mtctr r5
157 mtsrr0 r3
158 mtsrr1 r4
159 RFI
160
161/*
162 * Activate current's external feature (FPU/Altivec/VSX)
163 */
164#define define_load_up(what) \
165 \
166_GLOBAL(kvmppc_load_up_ ## what); \
167 subi r1, r1, INT_FRAME_SIZE; \
168 mflr r3; \
169 std r3, _LINK(r1); \
170 mfmsr r4; \
171 std r31, GPR3(r1); \
172 mr r31, r4; \
173 li r5, MSR_DR; \
174 oris r5, r5, MSR_EE@h; \
175 andc r4, r4, r5; \
176 mtmsr r4; \
177 \
178 bl .load_up_ ## what; \
179 \
180 mtmsr r31; \
181 ld r3, _LINK(r1); \
182 ld r31, GPR3(r1); \
183 addi r1, r1, INT_FRAME_SIZE; \
184 mtlr r3; \
185 blr
186
187define_load_up(fpu)
188#ifdef CONFIG_ALTIVEC
189define_load_up(altivec)
190#endif
191#ifdef CONFIG_VSX
192define_load_up(vsx)
193#endif
194
122.global kvmppc_trampoline_lowmem 195.global kvmppc_trampoline_lowmem
123kvmppc_trampoline_lowmem: 196kvmppc_trampoline_lowmem:
124 .long kvmppc_handler_lowmem_trampoline - _stext 197 .long kvmppc_handler_lowmem_trampoline - _stext
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index ecd237a03fd0..35b762722187 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -31,7 +31,7 @@
31#define REBOLT_SLB_ENTRY(num) \ 31#define REBOLT_SLB_ENTRY(num) \
32 ld r10, SHADOW_SLB_ESID(num)(r11); \ 32 ld r10, SHADOW_SLB_ESID(num)(r11); \
33 cmpdi r10, 0; \ 33 cmpdi r10, 0; \
34 beq slb_exit_skip_1; \ 34 beq slb_exit_skip_ ## num; \
35 oris r10, r10, SLB_ESID_V@h; \ 35 oris r10, r10, SLB_ESID_V@h; \
36 ld r9, SHADOW_SLB_VSID(num)(r11); \ 36 ld r9, SHADOW_SLB_VSID(num)(r11); \
37 slbmte r9, r10; \ 37 slbmte r9, r10; \
@@ -51,23 +51,21 @@ kvmppc_handler_trampoline_enter:
51 * 51 *
52 * MSR = ~IR|DR 52 * MSR = ~IR|DR
53 * R13 = PACA 53 * R13 = PACA
54 * R1 = host R1
55 * R2 = host R2
54 * R9 = guest IP 56 * R9 = guest IP
55 * R10 = guest MSR 57 * R10 = guest MSR
56 * R11 = free 58 * all other GPRS = free
57 * R12 = free 59 * PACA[KVM_CR] = guest CR
58 * PACA[PACA_EXMC + EX_R9] = guest R9 60 * PACA[KVM_XER] = guest XER
59 * PACA[PACA_EXMC + EX_R10] = guest R10
60 * PACA[PACA_EXMC + EX_R11] = guest R11
61 * PACA[PACA_EXMC + EX_R12] = guest R12
62 * PACA[PACA_EXMC + EX_R13] = guest R13
63 * PACA[PACA_EXMC + EX_CCR] = guest CR
64 * PACA[PACA_EXMC + EX_R3] = guest XER
65 */ 61 */
66 62
67 mtsrr0 r9 63 mtsrr0 r9
68 mtsrr1 r10 64 mtsrr1 r10
69 65
70 mtspr SPRN_SPRG_SCRATCH0, r0 66 /* Activate guest mode, so faults get handled by KVM */
67 li r11, KVM_GUEST_MODE_GUEST
68 stb r11, PACA_KVM_IN_GUEST(r13)
71 69
72 /* Remove LPAR shadow entries */ 70 /* Remove LPAR shadow entries */
73 71
@@ -131,20 +129,27 @@ slb_do_enter:
131 129
132 /* Enter guest */ 130 /* Enter guest */
133 131
134 mfspr r0, SPRN_SPRG_SCRATCH0 132 ld r0, (PACA_KVM_R0)(r13)
135 133 ld r1, (PACA_KVM_R1)(r13)
136 ld r9, (PACA_EXMC+EX_R9)(r13) 134 ld r2, (PACA_KVM_R2)(r13)
137 ld r10, (PACA_EXMC+EX_R10)(r13) 135 ld r3, (PACA_KVM_R3)(r13)
138 ld r12, (PACA_EXMC+EX_R12)(r13) 136 ld r4, (PACA_KVM_R4)(r13)
139 137 ld r5, (PACA_KVM_R5)(r13)
140 lwz r11, (PACA_EXMC+EX_CCR)(r13) 138 ld r6, (PACA_KVM_R6)(r13)
139 ld r7, (PACA_KVM_R7)(r13)
140 ld r8, (PACA_KVM_R8)(r13)
141 ld r9, (PACA_KVM_R9)(r13)
142 ld r10, (PACA_KVM_R10)(r13)
143 ld r12, (PACA_KVM_R12)(r13)
144
145 lwz r11, (PACA_KVM_CR)(r13)
141 mtcr r11 146 mtcr r11
142 147
143 ld r11, (PACA_EXMC+EX_R3)(r13) 148 ld r11, (PACA_KVM_XER)(r13)
144 mtxer r11 149 mtxer r11
145 150
146 ld r11, (PACA_EXMC+EX_R11)(r13) 151 ld r11, (PACA_KVM_R11)(r13)
147 ld r13, (PACA_EXMC+EX_R13)(r13) 152 ld r13, (PACA_KVM_R13)(r13)
148 153
149 RFI 154 RFI
150kvmppc_handler_trampoline_enter_end: 155kvmppc_handler_trampoline_enter_end:
@@ -162,28 +167,54 @@ kvmppc_handler_trampoline_exit:
162 167
163 /* Register usage at this point: 168 /* Register usage at this point:
164 * 169 *
165 * SPRG_SCRATCH0 = guest R13 170 * SPRG_SCRATCH0 = guest R13
166 * R01 = host R1 171 * R12 = exit handler id
167 * R02 = host R2 172 * R13 = PACA
168 * R10 = guest PC 173 * PACA.KVM.SCRATCH0 = guest R12
169 * R11 = guest MSR 174 * PACA.KVM.SCRATCH1 = guest CR
170 * R12 = exit handler id
171 * R13 = PACA
172 * PACA.exmc.CCR = guest CR
173 * PACA.exmc.R9 = guest R1
174 * PACA.exmc.R10 = guest R10
175 * PACA.exmc.R11 = guest R11
176 * PACA.exmc.R12 = guest R12
177 * PACA.exmc.R13 = guest R2
178 * 175 *
179 */ 176 */
180 177
181 /* Save registers */ 178 /* Save registers */
182 179
183 std r0, (PACA_EXMC+EX_SRR0)(r13) 180 std r0, PACA_KVM_R0(r13)
184 std r9, (PACA_EXMC+EX_R3)(r13) 181 std r1, PACA_KVM_R1(r13)
185 std r10, (PACA_EXMC+EX_LR)(r13) 182 std r2, PACA_KVM_R2(r13)
186 std r11, (PACA_EXMC+EX_DAR)(r13) 183 std r3, PACA_KVM_R3(r13)
184 std r4, PACA_KVM_R4(r13)
185 std r5, PACA_KVM_R5(r13)
186 std r6, PACA_KVM_R6(r13)
187 std r7, PACA_KVM_R7(r13)
188 std r8, PACA_KVM_R8(r13)
189 std r9, PACA_KVM_R9(r13)
190 std r10, PACA_KVM_R10(r13)
191 std r11, PACA_KVM_R11(r13)
192
193 /* Restore R1/R2 so we can handle faults */
194 ld r1, PACA_KVM_HOST_R1(r13)
195 ld r2, PACA_KVM_HOST_R2(r13)
196
197 /* Save guest PC and MSR in GPRs */
198 mfsrr0 r3
199 mfsrr1 r4
200
201 /* Get scratch'ed off registers */
202 mfspr r9, SPRN_SPRG_SCRATCH0
203 std r9, PACA_KVM_R13(r13)
204
205 ld r8, PACA_KVM_SCRATCH0(r13)
206 std r8, PACA_KVM_R12(r13)
207
208 lwz r7, PACA_KVM_SCRATCH1(r13)
209 stw r7, PACA_KVM_CR(r13)
210
211 /* Save more register state */
212
213 mfxer r6
214 stw r6, PACA_KVM_XER(r13)
215
216 mfdar r5
217 mfdsisr r6
187 218
188 /* 219 /*
189 * In order for us to easily get the last instruction, 220 * In order for us to easily get the last instruction,
@@ -202,17 +233,28 @@ kvmppc_handler_trampoline_exit:
202 233
203ld_last_inst: 234ld_last_inst:
204 /* Save off the guest instruction we're at */ 235 /* Save off the guest instruction we're at */
236
237 /* Set guest mode to 'jump over instruction' so if lwz faults
238 * we'll just continue at the next IP. */
239 li r9, KVM_GUEST_MODE_SKIP
240 stb r9, PACA_KVM_IN_GUEST(r13)
241
205 /* 1) enable paging for data */ 242 /* 1) enable paging for data */
206 mfmsr r9 243 mfmsr r9
207 ori r11, r9, MSR_DR /* Enable paging for data */ 244 ori r11, r9, MSR_DR /* Enable paging for data */
208 mtmsr r11 245 mtmsr r11
209 /* 2) fetch the instruction */ 246 /* 2) fetch the instruction */
210 lwz r0, 0(r10) 247 li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
248 lwz r0, 0(r3)
211 /* 3) disable paging again */ 249 /* 3) disable paging again */
212 mtmsr r9 250 mtmsr r9
213 251
214no_ld_last_inst: 252no_ld_last_inst:
215 253
254 /* Unset guest mode */
255 li r9, KVM_GUEST_MODE_NONE
256 stb r9, PACA_KVM_IN_GUEST(r13)
257
216 /* Restore bolted entries from the shadow and fix it along the way */ 258 /* Restore bolted entries from the shadow and fix it along the way */
217 259
218 /* We don't store anything in entry 0, so we don't need to take care of it */ 260 /* We don't store anything in entry 0, so we don't need to take care of it */
@@ -233,29 +275,27 @@ no_ld_last_inst:
233 275
234slb_do_exit: 276slb_do_exit:
235 277
236 /* Restore registers */ 278 /* Register usage at this point:
237 279 *
238 ld r11, (PACA_EXMC+EX_DAR)(r13) 280 * R0 = guest last inst
239 ld r10, (PACA_EXMC+EX_LR)(r13) 281 * R1 = host R1
240 ld r9, (PACA_EXMC+EX_R3)(r13) 282 * R2 = host R2
241 283 * R3 = guest PC
242 /* Save last inst */ 284 * R4 = guest MSR
243 stw r0, (PACA_EXMC+EX_LR)(r13) 285 * R5 = guest DAR
244 286 * R6 = guest DSISR
245 /* Save DAR and DSISR before going to paged mode */ 287 * R12 = exit handler id
246 mfdar r0 288 * R13 = PACA
247 std r0, (PACA_EXMC+EX_DAR)(r13) 289 * PACA.KVM.* = guest *
248 mfdsisr r0 290 *
249 stw r0, (PACA_EXMC+EX_DSISR)(r13) 291 */
250 292
251 /* RFI into the highmem handler */ 293 /* RFI into the highmem handler */
252 mfmsr r0 294 mfmsr r7
253 ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ 295 ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
254 mtsrr1 r0 296 mtsrr1 r7
255 ld r0, PACASAVEDMSR(r13) /* Highmem handler address */ 297 ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
256 mtsrr0 r0 298 mtsrr0 r8
257
258 mfspr r0, SPRN_SPRG_SCRATCH0
259 299
260 RFI 300 RFI
261kvmppc_handler_trampoline_exit_end: 301kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 06f5a9ecc42c..4d686cc6b260 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -69,10 +69,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
69 69
70 for (i = 0; i < 32; i += 4) { 70 for (i = 0; i < 32; i += 4) {
71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, 71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
72 vcpu->arch.gpr[i], 72 kvmppc_get_gpr(vcpu, i),
73 vcpu->arch.gpr[i+1], 73 kvmppc_get_gpr(vcpu, i+1),
74 vcpu->arch.gpr[i+2], 74 kvmppc_get_gpr(vcpu, i+2),
75 vcpu->arch.gpr[i+3]); 75 kvmppc_get_gpr(vcpu, i+3));
76 } 76 }
77} 77}
78 78
@@ -82,8 +82,32 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 set_bit(priority, &vcpu->arch.pending_exceptions); 82 set_bit(priority, &vcpu->arch.pending_exceptions);
83} 83}
84 84
85void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) 85static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
86 ulong dear_flags, ulong esr_flags)
86{ 87{
88 vcpu->arch.queued_dear = dear_flags;
89 vcpu->arch.queued_esr = esr_flags;
90 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
91}
92
93static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
94 ulong dear_flags, ulong esr_flags)
95{
96 vcpu->arch.queued_dear = dear_flags;
97 vcpu->arch.queued_esr = esr_flags;
98 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
99}
100
101static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
102 ulong esr_flags)
103{
104 vcpu->arch.queued_esr = esr_flags;
105 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
106}
107
108void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
109{
110 vcpu->arch.queued_esr = esr_flags;
87 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); 111 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
88} 112}
89 113
@@ -97,6 +121,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
97 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 121 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
98} 122}
99 123
124void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
125{
126 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
127}
128
100void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 129void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
101 struct kvm_interrupt *irq) 130 struct kvm_interrupt *irq)
102{ 131{
@@ -109,14 +138,19 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
109{ 138{
110 int allowed = 0; 139 int allowed = 0;
111 ulong msr_mask; 140 ulong msr_mask;
141 bool update_esr = false, update_dear = false;
112 142
113 switch (priority) { 143 switch (priority) {
114 case BOOKE_IRQPRIO_PROGRAM:
115 case BOOKE_IRQPRIO_DTLB_MISS: 144 case BOOKE_IRQPRIO_DTLB_MISS:
116 case BOOKE_IRQPRIO_ITLB_MISS:
117 case BOOKE_IRQPRIO_SYSCALL:
118 case BOOKE_IRQPRIO_DATA_STORAGE: 145 case BOOKE_IRQPRIO_DATA_STORAGE:
146 update_dear = true;
147 /* fall through */
119 case BOOKE_IRQPRIO_INST_STORAGE: 148 case BOOKE_IRQPRIO_INST_STORAGE:
149 case BOOKE_IRQPRIO_PROGRAM:
150 update_esr = true;
151 /* fall through */
152 case BOOKE_IRQPRIO_ITLB_MISS:
153 case BOOKE_IRQPRIO_SYSCALL:
120 case BOOKE_IRQPRIO_FP_UNAVAIL: 154 case BOOKE_IRQPRIO_FP_UNAVAIL:
121 case BOOKE_IRQPRIO_SPE_UNAVAIL: 155 case BOOKE_IRQPRIO_SPE_UNAVAIL:
122 case BOOKE_IRQPRIO_SPE_FP_DATA: 156 case BOOKE_IRQPRIO_SPE_FP_DATA:
@@ -151,6 +185,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
151 vcpu->arch.srr0 = vcpu->arch.pc; 185 vcpu->arch.srr0 = vcpu->arch.pc;
152 vcpu->arch.srr1 = vcpu->arch.msr; 186 vcpu->arch.srr1 = vcpu->arch.msr;
153 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 187 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
188 if (update_esr == true)
189 vcpu->arch.esr = vcpu->arch.queued_esr;
190 if (update_dear == true)
191 vcpu->arch.dear = vcpu->arch.queued_dear;
154 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); 192 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
155 193
156 clear_bit(priority, &vcpu->arch.pending_exceptions); 194 clear_bit(priority, &vcpu->arch.pending_exceptions);
@@ -223,8 +261,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
223 if (vcpu->arch.msr & MSR_PR) { 261 if (vcpu->arch.msr & MSR_PR) {
224 /* Program traps generated by user-level software must be handled 262 /* Program traps generated by user-level software must be handled
225 * by the guest kernel. */ 263 * by the guest kernel. */
226 vcpu->arch.esr = vcpu->arch.fault_esr; 264 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
227 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
228 r = RESUME_GUEST; 265 r = RESUME_GUEST;
229 kvmppc_account_exit(vcpu, USR_PR_INST); 266 kvmppc_account_exit(vcpu, USR_PR_INST);
230 break; 267 break;
@@ -280,16 +317,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
280 break; 317 break;
281 318
282 case BOOKE_INTERRUPT_DATA_STORAGE: 319 case BOOKE_INTERRUPT_DATA_STORAGE:
283 vcpu->arch.dear = vcpu->arch.fault_dear; 320 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
284 vcpu->arch.esr = vcpu->arch.fault_esr; 321 vcpu->arch.fault_esr);
285 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
286 kvmppc_account_exit(vcpu, DSI_EXITS); 322 kvmppc_account_exit(vcpu, DSI_EXITS);
287 r = RESUME_GUEST; 323 r = RESUME_GUEST;
288 break; 324 break;
289 325
290 case BOOKE_INTERRUPT_INST_STORAGE: 326 case BOOKE_INTERRUPT_INST_STORAGE:
291 vcpu->arch.esr = vcpu->arch.fault_esr; 327 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
292 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
293 kvmppc_account_exit(vcpu, ISI_EXITS); 328 kvmppc_account_exit(vcpu, ISI_EXITS);
294 r = RESUME_GUEST; 329 r = RESUME_GUEST;
295 break; 330 break;
@@ -310,9 +345,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
310 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); 345 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
311 if (gtlb_index < 0) { 346 if (gtlb_index < 0) {
312 /* The guest didn't have a mapping for it. */ 347 /* The guest didn't have a mapping for it. */
313 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); 348 kvmppc_core_queue_dtlb_miss(vcpu,
314 vcpu->arch.dear = vcpu->arch.fault_dear; 349 vcpu->arch.fault_dear,
315 vcpu->arch.esr = vcpu->arch.fault_esr; 350 vcpu->arch.fault_esr);
316 kvmppc_mmu_dtlb_miss(vcpu); 351 kvmppc_mmu_dtlb_miss(vcpu);
317 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); 352 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
318 r = RESUME_GUEST; 353 r = RESUME_GUEST;
@@ -426,7 +461,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
426{ 461{
427 vcpu->arch.pc = 0; 462 vcpu->arch.pc = 0;
428 vcpu->arch.msr = 0; 463 vcpu->arch.msr = 0;
429 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 464 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
430 465
431 vcpu->arch.shadow_pid = 1; 466 vcpu->arch.shadow_pid = 1;
432 467
@@ -444,10 +479,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
444 int i; 479 int i;
445 480
446 regs->pc = vcpu->arch.pc; 481 regs->pc = vcpu->arch.pc;
447 regs->cr = vcpu->arch.cr; 482 regs->cr = kvmppc_get_cr(vcpu);
448 regs->ctr = vcpu->arch.ctr; 483 regs->ctr = vcpu->arch.ctr;
449 regs->lr = vcpu->arch.lr; 484 regs->lr = vcpu->arch.lr;
450 regs->xer = vcpu->arch.xer; 485 regs->xer = kvmppc_get_xer(vcpu);
451 regs->msr = vcpu->arch.msr; 486 regs->msr = vcpu->arch.msr;
452 regs->srr0 = vcpu->arch.srr0; 487 regs->srr0 = vcpu->arch.srr0;
453 regs->srr1 = vcpu->arch.srr1; 488 regs->srr1 = vcpu->arch.srr1;
@@ -461,7 +496,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
461 regs->sprg7 = vcpu->arch.sprg6; 496 regs->sprg7 = vcpu->arch.sprg6;
462 497
463 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 498 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
464 regs->gpr[i] = vcpu->arch.gpr[i]; 499 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
465 500
466 return 0; 501 return 0;
467} 502}
@@ -471,10 +506,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
471 int i; 506 int i;
472 507
473 vcpu->arch.pc = regs->pc; 508 vcpu->arch.pc = regs->pc;
474 vcpu->arch.cr = regs->cr; 509 kvmppc_set_cr(vcpu, regs->cr);
475 vcpu->arch.ctr = regs->ctr; 510 vcpu->arch.ctr = regs->ctr;
476 vcpu->arch.lr = regs->lr; 511 vcpu->arch.lr = regs->lr;
477 vcpu->arch.xer = regs->xer; 512 kvmppc_set_xer(vcpu, regs->xer);
478 kvmppc_set_msr(vcpu, regs->msr); 513 kvmppc_set_msr(vcpu, regs->msr);
479 vcpu->arch.srr0 = regs->srr0; 514 vcpu->arch.srr0 = regs->srr0;
480 vcpu->arch.srr1 = regs->srr1; 515 vcpu->arch.srr1 = regs->srr1;
@@ -486,8 +521,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
486 vcpu->arch.sprg6 = regs->sprg5; 521 vcpu->arch.sprg6 = regs->sprg5;
487 vcpu->arch.sprg7 = regs->sprg6; 522 vcpu->arch.sprg7 = regs->sprg6;
488 523
489 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) 524 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
490 vcpu->arch.gpr[i] = regs->gpr[i]; 525 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
491 526
492 return 0; 527 return 0;
493} 528}
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index aebc65e93f4b..cbc790ee1928 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -62,20 +62,20 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 62
63 case OP_31_XOP_MFMSR: 63 case OP_31_XOP_MFMSR:
64 rt = get_rt(inst); 64 rt = get_rt(inst);
65 vcpu->arch.gpr[rt] = vcpu->arch.msr; 65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr);
66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); 66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
67 break; 67 break;
68 68
69 case OP_31_XOP_MTMSR: 69 case OP_31_XOP_MTMSR:
70 rs = get_rs(inst); 70 rs = get_rs(inst);
71 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); 71 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
72 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); 72 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
73 break; 73 break;
74 74
75 case OP_31_XOP_WRTEE: 75 case OP_31_XOP_WRTEE:
76 rs = get_rs(inst); 76 rs = get_rs(inst);
77 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 77 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
78 | (vcpu->arch.gpr[rs] & MSR_EE); 78 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
80 break; 80 break;
81 81
@@ -101,22 +101,23 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
101int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 101int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
102{ 102{
103 int emulated = EMULATE_DONE; 103 int emulated = EMULATE_DONE;
104 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
104 105
105 switch (sprn) { 106 switch (sprn) {
106 case SPRN_DEAR: 107 case SPRN_DEAR:
107 vcpu->arch.dear = vcpu->arch.gpr[rs]; break; 108 vcpu->arch.dear = spr_val; break;
108 case SPRN_ESR: 109 case SPRN_ESR:
109 vcpu->arch.esr = vcpu->arch.gpr[rs]; break; 110 vcpu->arch.esr = spr_val; break;
110 case SPRN_DBCR0: 111 case SPRN_DBCR0:
111 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; 112 vcpu->arch.dbcr0 = spr_val; break;
112 case SPRN_DBCR1: 113 case SPRN_DBCR1:
113 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; 114 vcpu->arch.dbcr1 = spr_val; break;
114 case SPRN_DBSR: 115 case SPRN_DBSR:
115 vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break; 116 vcpu->arch.dbsr &= ~spr_val; break;
116 case SPRN_TSR: 117 case SPRN_TSR:
117 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; 118 vcpu->arch.tsr &= ~spr_val; break;
118 case SPRN_TCR: 119 case SPRN_TCR:
119 vcpu->arch.tcr = vcpu->arch.gpr[rs]; 120 vcpu->arch.tcr = spr_val;
120 kvmppc_emulate_dec(vcpu); 121 kvmppc_emulate_dec(vcpu);
121 break; 122 break;
122 123
@@ -124,64 +125,64 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
124 * loaded into the real SPRGs when resuming the 125 * loaded into the real SPRGs when resuming the
125 * guest. */ 126 * guest. */
126 case SPRN_SPRG4: 127 case SPRN_SPRG4:
127 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; 128 vcpu->arch.sprg4 = spr_val; break;
128 case SPRN_SPRG5: 129 case SPRN_SPRG5:
129 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; 130 vcpu->arch.sprg5 = spr_val; break;
130 case SPRN_SPRG6: 131 case SPRN_SPRG6:
131 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; 132 vcpu->arch.sprg6 = spr_val; break;
132 case SPRN_SPRG7: 133 case SPRN_SPRG7:
133 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; 134 vcpu->arch.sprg7 = spr_val; break;
134 135
135 case SPRN_IVPR: 136 case SPRN_IVPR:
136 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; 137 vcpu->arch.ivpr = spr_val;
137 break; 138 break;
138 case SPRN_IVOR0: 139 case SPRN_IVOR0:
139 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; 140 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
140 break; 141 break;
141 case SPRN_IVOR1: 142 case SPRN_IVOR1:
142 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; 143 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
143 break; 144 break;
144 case SPRN_IVOR2: 145 case SPRN_IVOR2:
145 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; 146 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
146 break; 147 break;
147 case SPRN_IVOR3: 148 case SPRN_IVOR3:
148 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; 149 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
149 break; 150 break;
150 case SPRN_IVOR4: 151 case SPRN_IVOR4:
151 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; 152 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
152 break; 153 break;
153 case SPRN_IVOR5: 154 case SPRN_IVOR5:
154 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; 155 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
155 break; 156 break;
156 case SPRN_IVOR6: 157 case SPRN_IVOR6:
157 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; 158 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
158 break; 159 break;
159 case SPRN_IVOR7: 160 case SPRN_IVOR7:
160 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; 161 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
161 break; 162 break;
162 case SPRN_IVOR8: 163 case SPRN_IVOR8:
163 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; 164 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
164 break; 165 break;
165 case SPRN_IVOR9: 166 case SPRN_IVOR9:
166 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; 167 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
167 break; 168 break;
168 case SPRN_IVOR10: 169 case SPRN_IVOR10:
169 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; 170 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
170 break; 171 break;
171 case SPRN_IVOR11: 172 case SPRN_IVOR11:
172 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; 173 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
173 break; 174 break;
174 case SPRN_IVOR12: 175 case SPRN_IVOR12:
175 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; 176 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
176 break; 177 break;
177 case SPRN_IVOR13: 178 case SPRN_IVOR13:
178 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; 179 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
179 break; 180 break;
180 case SPRN_IVOR14: 181 case SPRN_IVOR14:
181 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; 182 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
182 break; 183 break;
183 case SPRN_IVOR15: 184 case SPRN_IVOR15:
184 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; 185 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
185 break; 186 break;
186 187
187 default: 188 default:
@@ -197,65 +198,65 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
197 198
198 switch (sprn) { 199 switch (sprn) {
199 case SPRN_IVPR: 200 case SPRN_IVPR:
200 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; 201 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
201 case SPRN_DEAR: 202 case SPRN_DEAR:
202 vcpu->arch.gpr[rt] = vcpu->arch.dear; break; 203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break;
203 case SPRN_ESR: 204 case SPRN_ESR:
204 vcpu->arch.gpr[rt] = vcpu->arch.esr; break; 205 kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
205 case SPRN_DBCR0: 206 case SPRN_DBCR0:
206 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; 207 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
207 case SPRN_DBCR1: 208 case SPRN_DBCR1:
208 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; 209 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
209 case SPRN_DBSR: 210 case SPRN_DBSR:
210 vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break; 211 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
211 212
212 case SPRN_IVOR0: 213 case SPRN_IVOR0:
213 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; 214 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
214 break; 215 break;
215 case SPRN_IVOR1: 216 case SPRN_IVOR1:
216 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; 217 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
217 break; 218 break;
218 case SPRN_IVOR2: 219 case SPRN_IVOR2:
219 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; 220 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
220 break; 221 break;
221 case SPRN_IVOR3: 222 case SPRN_IVOR3:
222 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; 223 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
223 break; 224 break;
224 case SPRN_IVOR4: 225 case SPRN_IVOR4:
225 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; 226 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
226 break; 227 break;
227 case SPRN_IVOR5: 228 case SPRN_IVOR5:
228 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; 229 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
229 break; 230 break;
230 case SPRN_IVOR6: 231 case SPRN_IVOR6:
231 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; 232 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
232 break; 233 break;
233 case SPRN_IVOR7: 234 case SPRN_IVOR7:
234 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; 235 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
235 break; 236 break;
236 case SPRN_IVOR8: 237 case SPRN_IVOR8:
237 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; 238 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
238 break; 239 break;
239 case SPRN_IVOR9: 240 case SPRN_IVOR9:
240 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; 241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
241 break; 242 break;
242 case SPRN_IVOR10: 243 case SPRN_IVOR10:
243 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; 244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
244 break; 245 break;
245 case SPRN_IVOR11: 246 case SPRN_IVOR11:
246 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; 247 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
247 break; 248 break;
248 case SPRN_IVOR12: 249 case SPRN_IVOR12:
249 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; 250 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
250 break; 251 break;
251 case SPRN_IVOR13: 252 case SPRN_IVOR13:
252 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; 253 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
253 break; 254 break;
254 case SPRN_IVOR14: 255 case SPRN_IVOR14:
255 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; 256 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
256 break; 257 break;
257 case SPRN_IVOR15: 258 case SPRN_IVOR15:
258 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 259 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
259 break; 260 break;
260 261
261 default: 262 default:
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 64949eef43f1..efa1198940ab 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -60,6 +60,12 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
60 60
61 kvmppc_e500_tlb_setup(vcpu_e500); 61 kvmppc_e500_tlb_setup(vcpu_e500);
62 62
63 /* Registers init */
64 vcpu->arch.pvr = mfspr(SPRN_PVR);
65
66 /* Since booke kvm only support one core, update all vcpus' PIR to 0 */
67 vcpu->vcpu_id = 0;
68
63 return 0; 69 return 0;
64} 70}
65 71
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index be95b8d8e3b7..8e3edfbc9634 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -74,54 +74,59 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
74{ 74{
75 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 75 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
76 int emulated = EMULATE_DONE; 76 int emulated = EMULATE_DONE;
77 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
77 78
78 switch (sprn) { 79 switch (sprn) {
79 case SPRN_PID: 80 case SPRN_PID:
80 vcpu_e500->pid[0] = vcpu->arch.shadow_pid = 81 vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
81 vcpu->arch.pid = vcpu->arch.gpr[rs]; 82 vcpu->arch.pid = spr_val;
82 break; 83 break;
83 case SPRN_PID1: 84 case SPRN_PID1:
84 vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break; 85 vcpu_e500->pid[1] = spr_val; break;
85 case SPRN_PID2: 86 case SPRN_PID2:
86 vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break; 87 vcpu_e500->pid[2] = spr_val; break;
87 case SPRN_MAS0: 88 case SPRN_MAS0:
88 vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break; 89 vcpu_e500->mas0 = spr_val; break;
89 case SPRN_MAS1: 90 case SPRN_MAS1:
90 vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break; 91 vcpu_e500->mas1 = spr_val; break;
91 case SPRN_MAS2: 92 case SPRN_MAS2:
92 vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break; 93 vcpu_e500->mas2 = spr_val; break;
93 case SPRN_MAS3: 94 case SPRN_MAS3:
94 vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break; 95 vcpu_e500->mas3 = spr_val; break;
95 case SPRN_MAS4: 96 case SPRN_MAS4:
96 vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break; 97 vcpu_e500->mas4 = spr_val; break;
97 case SPRN_MAS6: 98 case SPRN_MAS6:
98 vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break; 99 vcpu_e500->mas6 = spr_val; break;
99 case SPRN_MAS7: 100 case SPRN_MAS7:
100 vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break; 101 vcpu_e500->mas7 = spr_val; break;
102 case SPRN_L1CSR0:
103 vcpu_e500->l1csr0 = spr_val;
104 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
105 break;
101 case SPRN_L1CSR1: 106 case SPRN_L1CSR1:
102 vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break; 107 vcpu_e500->l1csr1 = spr_val; break;
103 case SPRN_HID0: 108 case SPRN_HID0:
104 vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break; 109 vcpu_e500->hid0 = spr_val; break;
105 case SPRN_HID1: 110 case SPRN_HID1:
106 vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; 111 vcpu_e500->hid1 = spr_val; break;
107 112
108 case SPRN_MMUCSR0: 113 case SPRN_MMUCSR0:
109 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, 114 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
110 vcpu->arch.gpr[rs]); 115 spr_val);
111 break; 116 break;
112 117
113 /* extra exceptions */ 118 /* extra exceptions */
114 case SPRN_IVOR32: 119 case SPRN_IVOR32:
115 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs]; 120 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
116 break; 121 break;
117 case SPRN_IVOR33: 122 case SPRN_IVOR33:
118 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs]; 123 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
119 break; 124 break;
120 case SPRN_IVOR34: 125 case SPRN_IVOR34:
121 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs]; 126 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
122 break; 127 break;
123 case SPRN_IVOR35: 128 case SPRN_IVOR35:
124 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs]; 129 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
125 break; 130 break;
126 131
127 default: 132 default:
@@ -138,63 +143,57 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
138 143
139 switch (sprn) { 144 switch (sprn) {
140 case SPRN_PID: 145 case SPRN_PID:
141 vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break; 146 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
142 case SPRN_PID1: 147 case SPRN_PID1:
143 vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break; 148 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
144 case SPRN_PID2: 149 case SPRN_PID2:
145 vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break; 150 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
146 case SPRN_MAS0: 151 case SPRN_MAS0:
147 vcpu->arch.gpr[rt] = vcpu_e500->mas0; break; 152 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
148 case SPRN_MAS1: 153 case SPRN_MAS1:
149 vcpu->arch.gpr[rt] = vcpu_e500->mas1; break; 154 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
150 case SPRN_MAS2: 155 case SPRN_MAS2:
151 vcpu->arch.gpr[rt] = vcpu_e500->mas2; break; 156 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
152 case SPRN_MAS3: 157 case SPRN_MAS3:
153 vcpu->arch.gpr[rt] = vcpu_e500->mas3; break; 158 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
154 case SPRN_MAS4: 159 case SPRN_MAS4:
155 vcpu->arch.gpr[rt] = vcpu_e500->mas4; break; 160 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
156 case SPRN_MAS6: 161 case SPRN_MAS6:
157 vcpu->arch.gpr[rt] = vcpu_e500->mas6; break; 162 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
158 case SPRN_MAS7: 163 case SPRN_MAS7:
159 vcpu->arch.gpr[rt] = vcpu_e500->mas7; break; 164 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
160 165
161 case SPRN_TLB0CFG: 166 case SPRN_TLB0CFG:
162 vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG); 167 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
163 vcpu->arch.gpr[rt] &= ~0xfffUL;
164 vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0];
165 break;
166
167 case SPRN_TLB1CFG: 168 case SPRN_TLB1CFG:
168 vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG); 169 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
169 vcpu->arch.gpr[rt] &= ~0xfffUL; 170 case SPRN_L1CSR0:
170 vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1]; 171 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
171 break;
172
173 case SPRN_L1CSR1: 172 case SPRN_L1CSR1:
174 vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break; 173 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
175 case SPRN_HID0: 174 case SPRN_HID0:
176 vcpu->arch.gpr[rt] = vcpu_e500->hid0; break; 175 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
177 case SPRN_HID1: 176 case SPRN_HID1:
178 vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; 177 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
179 178
180 case SPRN_MMUCSR0: 179 case SPRN_MMUCSR0:
181 vcpu->arch.gpr[rt] = 0; break; 180 kvmppc_set_gpr(vcpu, rt, 0); break;
182 181
183 case SPRN_MMUCFG: 182 case SPRN_MMUCFG:
184 vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break; 183 kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
185 184
186 /* extra exceptions */ 185 /* extra exceptions */
187 case SPRN_IVOR32: 186 case SPRN_IVOR32:
188 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; 187 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
189 break; 188 break;
190 case SPRN_IVOR33: 189 case SPRN_IVOR33:
191 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; 190 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
192 break; 191 break;
193 case SPRN_IVOR34: 192 case SPRN_IVOR34:
194 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; 193 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
195 break; 194 break;
196 case SPRN_IVOR35: 195 case SPRN_IVOR35:
197 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; 196 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
198 break; 197 break;
199 default: 198 default:
200 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 199 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index fb1e1dc11ba5..0d772e6b6318 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -417,7 +417,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
417 int esel, tlbsel; 417 int esel, tlbsel;
418 gva_t ea; 418 gva_t ea;
419 419
420 ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb]; 420 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
421 421
422 ia = (ea >> 2) & 0x1; 422 ia = (ea >> 2) & 0x1;
423 423
@@ -470,7 +470,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
470 struct tlbe *gtlbe = NULL; 470 struct tlbe *gtlbe = NULL;
471 gva_t ea; 471 gva_t ea;
472 472
473 ea = vcpu->arch.gpr[rb]; 473 ea = kvmppc_get_gpr(vcpu, rb);
474 474
475 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 475 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
476 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); 476 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
@@ -728,6 +728,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
728 if (vcpu_e500->shadow_pages[1] == NULL) 728 if (vcpu_e500->shadow_pages[1] == NULL)
729 goto err_out_page0; 729 goto err_out_page0;
730 730
731 /* Init TLB configuration register */
732 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
733 vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
734 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
735 vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
736
731 return 0; 737 return 0;
732 738
733err_out_page0: 739err_out_page0:
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 4a9ac6640fad..cb72a65f4ecc 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -83,6 +83,9 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
83 83
84 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 84 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85#ifdef CONFIG_PPC64 85#ifdef CONFIG_PPC64
86 /* mtdec lowers the interrupt line when positive. */
87 kvmppc_core_dequeue_dec(vcpu);
88
86 /* POWER4+ triggers a dec interrupt if the value is < 0 */ 89 /* POWER4+ triggers a dec interrupt if the value is < 0 */
87 if (vcpu->arch.dec & 0x80000000) { 90 if (vcpu->arch.dec & 0x80000000) {
88 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 91 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
@@ -140,14 +143,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
140 143
141 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 144 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
142 145
146 /* Try again next time */
147 if (inst == KVM_INST_FETCH_FAILED)
148 return EMULATE_DONE;
149
143 switch (get_op(inst)) { 150 switch (get_op(inst)) {
144 case OP_TRAP: 151 case OP_TRAP:
145#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC64
146 case OP_TRAP_64: 153 case OP_TRAP_64:
154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
147#else 155#else
148 vcpu->arch.esr |= ESR_PTR; 156 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
149#endif 157#endif
150 kvmppc_core_queue_program(vcpu);
151 advance = 0; 158 advance = 0;
152 break; 159 break;
153 160
@@ -167,14 +174,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
167 case OP_31_XOP_STWX: 174 case OP_31_XOP_STWX:
168 rs = get_rs(inst); 175 rs = get_rs(inst);
169 emulated = kvmppc_handle_store(run, vcpu, 176 emulated = kvmppc_handle_store(run, vcpu,
170 vcpu->arch.gpr[rs], 177 kvmppc_get_gpr(vcpu, rs),
171 4, 1); 178 4, 1);
172 break; 179 break;
173 180
174 case OP_31_XOP_STBX: 181 case OP_31_XOP_STBX:
175 rs = get_rs(inst); 182 rs = get_rs(inst);
176 emulated = kvmppc_handle_store(run, vcpu, 183 emulated = kvmppc_handle_store(run, vcpu,
177 vcpu->arch.gpr[rs], 184 kvmppc_get_gpr(vcpu, rs),
178 1, 1); 185 1, 1);
179 break; 186 break;
180 187
@@ -183,14 +190,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
183 ra = get_ra(inst); 190 ra = get_ra(inst);
184 rb = get_rb(inst); 191 rb = get_rb(inst);
185 192
186 ea = vcpu->arch.gpr[rb]; 193 ea = kvmppc_get_gpr(vcpu, rb);
187 if (ra) 194 if (ra)
188 ea += vcpu->arch.gpr[ra]; 195 ea += kvmppc_get_gpr(vcpu, ra);
189 196
190 emulated = kvmppc_handle_store(run, vcpu, 197 emulated = kvmppc_handle_store(run, vcpu,
191 vcpu->arch.gpr[rs], 198 kvmppc_get_gpr(vcpu, rs),
192 1, 1); 199 1, 1);
193 vcpu->arch.gpr[rs] = ea; 200 kvmppc_set_gpr(vcpu, rs, ea);
194 break; 201 break;
195 202
196 case OP_31_XOP_LHZX: 203 case OP_31_XOP_LHZX:
@@ -203,12 +210,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
203 ra = get_ra(inst); 210 ra = get_ra(inst);
204 rb = get_rb(inst); 211 rb = get_rb(inst);
205 212
206 ea = vcpu->arch.gpr[rb]; 213 ea = kvmppc_get_gpr(vcpu, rb);
207 if (ra) 214 if (ra)
208 ea += vcpu->arch.gpr[ra]; 215 ea += kvmppc_get_gpr(vcpu, ra);
209 216
210 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 217 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
211 vcpu->arch.gpr[ra] = ea; 218 kvmppc_set_gpr(vcpu, ra, ea);
212 break; 219 break;
213 220
214 case OP_31_XOP_MFSPR: 221 case OP_31_XOP_MFSPR:
@@ -217,47 +224,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
217 224
218 switch (sprn) { 225 switch (sprn) {
219 case SPRN_SRR0: 226 case SPRN_SRR0:
220 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 227 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
221 case SPRN_SRR1: 228 case SPRN_SRR1:
222 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 229 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
223 case SPRN_PVR: 230 case SPRN_PVR:
224 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; 231 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
225 case SPRN_PIR: 232 case SPRN_PIR:
226 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; 233 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
227 case SPRN_MSSSR0: 234 case SPRN_MSSSR0:
228 vcpu->arch.gpr[rt] = 0; break; 235 kvmppc_set_gpr(vcpu, rt, 0); break;
229 236
230 /* Note: mftb and TBRL/TBWL are user-accessible, so 237 /* Note: mftb and TBRL/TBWL are user-accessible, so
231 * the guest can always access the real TB anyways. 238 * the guest can always access the real TB anyways.
232 * In fact, we probably will never see these traps. */ 239 * In fact, we probably will never see these traps. */
233 case SPRN_TBWL: 240 case SPRN_TBWL:
234 vcpu->arch.gpr[rt] = get_tb() >> 32; break; 241 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
235 case SPRN_TBWU: 242 case SPRN_TBWU:
236 vcpu->arch.gpr[rt] = get_tb(); break; 243 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
237 244
238 case SPRN_SPRG0: 245 case SPRN_SPRG0:
239 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 246 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
240 case SPRN_SPRG1: 247 case SPRN_SPRG1:
241 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; 248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
242 case SPRN_SPRG2: 249 case SPRN_SPRG2:
243 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; 250 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
244 case SPRN_SPRG3: 251 case SPRN_SPRG3:
245 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; 252 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
246 /* Note: SPRG4-7 are user-readable, so we don't get 253 /* Note: SPRG4-7 are user-readable, so we don't get
247 * a trap. */ 254 * a trap. */
248 255
249 case SPRN_DEC: 256 case SPRN_DEC:
250 { 257 {
251 u64 jd = get_tb() - vcpu->arch.dec_jiffies; 258 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
252 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; 259 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
253 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); 260 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
261 vcpu->arch.dec, jd,
262 kvmppc_get_gpr(vcpu, rt));
254 break; 263 break;
255 } 264 }
256 default: 265 default:
257 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 266 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
258 if (emulated == EMULATE_FAIL) { 267 if (emulated == EMULATE_FAIL) {
259 printk("mfspr: unknown spr %x\n", sprn); 268 printk("mfspr: unknown spr %x\n", sprn);
260 vcpu->arch.gpr[rt] = 0; 269 kvmppc_set_gpr(vcpu, rt, 0);
261 } 270 }
262 break; 271 break;
263 } 272 }
@@ -269,7 +278,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
269 rb = get_rb(inst); 278 rb = get_rb(inst);
270 279
271 emulated = kvmppc_handle_store(run, vcpu, 280 emulated = kvmppc_handle_store(run, vcpu,
272 vcpu->arch.gpr[rs], 281 kvmppc_get_gpr(vcpu, rs),
273 2, 1); 282 2, 1);
274 break; 283 break;
275 284
@@ -278,14 +287,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
278 ra = get_ra(inst); 287 ra = get_ra(inst);
279 rb = get_rb(inst); 288 rb = get_rb(inst);
280 289
281 ea = vcpu->arch.gpr[rb]; 290 ea = kvmppc_get_gpr(vcpu, rb);
282 if (ra) 291 if (ra)
283 ea += vcpu->arch.gpr[ra]; 292 ea += kvmppc_get_gpr(vcpu, ra);
284 293
285 emulated = kvmppc_handle_store(run, vcpu, 294 emulated = kvmppc_handle_store(run, vcpu,
286 vcpu->arch.gpr[rs], 295 kvmppc_get_gpr(vcpu, rs),
287 2, 1); 296 2, 1);
288 vcpu->arch.gpr[ra] = ea; 297 kvmppc_set_gpr(vcpu, ra, ea);
289 break; 298 break;
290 299
291 case OP_31_XOP_MTSPR: 300 case OP_31_XOP_MTSPR:
@@ -293,9 +302,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
293 rs = get_rs(inst); 302 rs = get_rs(inst);
294 switch (sprn) { 303 switch (sprn) {
295 case SPRN_SRR0: 304 case SPRN_SRR0:
296 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 305 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
297 case SPRN_SRR1: 306 case SPRN_SRR1:
298 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 307 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
299 308
300 /* XXX We need to context-switch the timebase for 309 /* XXX We need to context-switch the timebase for
301 * watchdog and FIT. */ 310 * watchdog and FIT. */
@@ -305,18 +314,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
305 case SPRN_MSSSR0: break; 314 case SPRN_MSSSR0: break;
306 315
307 case SPRN_DEC: 316 case SPRN_DEC:
308 vcpu->arch.dec = vcpu->arch.gpr[rs]; 317 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
309 kvmppc_emulate_dec(vcpu); 318 kvmppc_emulate_dec(vcpu);
310 break; 319 break;
311 320
312 case SPRN_SPRG0: 321 case SPRN_SPRG0:
313 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 322 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
314 case SPRN_SPRG1: 323 case SPRN_SPRG1:
315 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; 324 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
316 case SPRN_SPRG2: 325 case SPRN_SPRG2:
317 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; 326 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
318 case SPRN_SPRG3: 327 case SPRN_SPRG3:
319 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 328 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
320 329
321 default: 330 default:
322 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 331 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
@@ -348,7 +357,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
348 rb = get_rb(inst); 357 rb = get_rb(inst);
349 358
350 emulated = kvmppc_handle_store(run, vcpu, 359 emulated = kvmppc_handle_store(run, vcpu,
351 vcpu->arch.gpr[rs], 360 kvmppc_get_gpr(vcpu, rs),
352 4, 0); 361 4, 0);
353 break; 362 break;
354 363
@@ -363,7 +372,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
363 rb = get_rb(inst); 372 rb = get_rb(inst);
364 373
365 emulated = kvmppc_handle_store(run, vcpu, 374 emulated = kvmppc_handle_store(run, vcpu,
366 vcpu->arch.gpr[rs], 375 kvmppc_get_gpr(vcpu, rs),
367 2, 0); 376 2, 0);
368 break; 377 break;
369 378
@@ -382,7 +391,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
382 ra = get_ra(inst); 391 ra = get_ra(inst);
383 rt = get_rt(inst); 392 rt = get_rt(inst);
384 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 393 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
385 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 394 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
386 break; 395 break;
387 396
388 case OP_LBZ: 397 case OP_LBZ:
@@ -394,35 +403,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
394 ra = get_ra(inst); 403 ra = get_ra(inst);
395 rt = get_rt(inst); 404 rt = get_rt(inst);
396 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 405 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
397 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 406 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
398 break; 407 break;
399 408
400 case OP_STW: 409 case OP_STW:
401 rs = get_rs(inst); 410 rs = get_rs(inst);
402 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 411 emulated = kvmppc_handle_store(run, vcpu,
412 kvmppc_get_gpr(vcpu, rs),
403 4, 1); 413 4, 1);
404 break; 414 break;
405 415
406 case OP_STWU: 416 case OP_STWU:
407 ra = get_ra(inst); 417 ra = get_ra(inst);
408 rs = get_rs(inst); 418 rs = get_rs(inst);
409 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 419 emulated = kvmppc_handle_store(run, vcpu,
420 kvmppc_get_gpr(vcpu, rs),
410 4, 1); 421 4, 1);
411 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 422 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
412 break; 423 break;
413 424
414 case OP_STB: 425 case OP_STB:
415 rs = get_rs(inst); 426 rs = get_rs(inst);
416 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 427 emulated = kvmppc_handle_store(run, vcpu,
428 kvmppc_get_gpr(vcpu, rs),
417 1, 1); 429 1, 1);
418 break; 430 break;
419 431
420 case OP_STBU: 432 case OP_STBU:
421 ra = get_ra(inst); 433 ra = get_ra(inst);
422 rs = get_rs(inst); 434 rs = get_rs(inst);
423 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 435 emulated = kvmppc_handle_store(run, vcpu,
436 kvmppc_get_gpr(vcpu, rs),
424 1, 1); 437 1, 1);
425 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 438 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
426 break; 439 break;
427 440
428 case OP_LHZ: 441 case OP_LHZ:
@@ -434,21 +447,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
434 ra = get_ra(inst); 447 ra = get_ra(inst);
435 rt = get_rt(inst); 448 rt = get_rt(inst);
436 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 449 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
437 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
438 break; 451 break;
439 452
440 case OP_STH: 453 case OP_STH:
441 rs = get_rs(inst); 454 rs = get_rs(inst);
442 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 455 emulated = kvmppc_handle_store(run, vcpu,
456 kvmppc_get_gpr(vcpu, rs),
443 2, 1); 457 2, 1);
444 break; 458 break;
445 459
446 case OP_STHU: 460 case OP_STHU:
447 ra = get_ra(inst); 461 ra = get_ra(inst);
448 rs = get_rs(inst); 462 rs = get_rs(inst);
449 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 463 emulated = kvmppc_handle_store(run, vcpu,
464 kvmppc_get_gpr(vcpu, rs),
450 2, 1); 465 2, 1);
451 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 466 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
452 break; 467 break;
453 468
454 default: 469 default:
@@ -461,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
461 advance = 0; 476 advance = 0;
462 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 477 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
463 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 478 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
479 kvmppc_core_queue_program(vcpu, 0);
464 } 480 }
465 } 481 }
466 482
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index f06cf93b178e..51aedd7f16bc 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -137,6 +137,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
137{ 137{
138 kvmppc_free_vcpus(kvm); 138 kvmppc_free_vcpus(kvm);
139 kvm_free_physmem(kvm); 139 kvm_free_physmem(kvm);
140 cleanup_srcu_struct(&kvm->srcu);
140 kfree(kvm); 141 kfree(kvm);
141} 142}
142 143
@@ -165,14 +166,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
165 return -EINVAL; 166 return -EINVAL;
166} 167}
167 168
168int kvm_arch_set_memory_region(struct kvm *kvm, 169int kvm_arch_prepare_memory_region(struct kvm *kvm,
169 struct kvm_userspace_memory_region *mem, 170 struct kvm_memory_slot *memslot,
170 struct kvm_memory_slot old, 171 struct kvm_memory_slot old,
171 int user_alloc) 172 struct kvm_userspace_memory_region *mem,
173 int user_alloc)
172{ 174{
173 return 0; 175 return 0;
174} 176}
175 177
178void kvm_arch_commit_memory_region(struct kvm *kvm,
179 struct kvm_userspace_memory_region *mem,
180 struct kvm_memory_slot old,
181 int user_alloc)
182{
183 return;
184}
185
186
176void kvm_arch_flush_shadow(struct kvm *kvm) 187void kvm_arch_flush_shadow(struct kvm *kvm)
177{ 188{
178} 189}
@@ -260,34 +271,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
260static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 271static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
261 struct kvm_run *run) 272 struct kvm_run *run)
262{ 273{
263 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 274 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
264 *gpr = run->dcr.data;
265} 275}
266 276
267static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 277static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
268 struct kvm_run *run) 278 struct kvm_run *run)
269{ 279{
270 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 280 ulong gpr;
271 281
272 if (run->mmio.len > sizeof(*gpr)) { 282 if (run->mmio.len > sizeof(gpr)) {
273 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 283 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
274 return; 284 return;
275 } 285 }
276 286
277 if (vcpu->arch.mmio_is_bigendian) { 287 if (vcpu->arch.mmio_is_bigendian) {
278 switch (run->mmio.len) { 288 switch (run->mmio.len) {
279 case 4: *gpr = *(u32 *)run->mmio.data; break; 289 case 4: gpr = *(u32 *)run->mmio.data; break;
280 case 2: *gpr = *(u16 *)run->mmio.data; break; 290 case 2: gpr = *(u16 *)run->mmio.data; break;
281 case 1: *gpr = *(u8 *)run->mmio.data; break; 291 case 1: gpr = *(u8 *)run->mmio.data; break;
282 } 292 }
283 } else { 293 } else {
284 /* Convert BE data from userland back to LE. */ 294 /* Convert BE data from userland back to LE. */
285 switch (run->mmio.len) { 295 switch (run->mmio.len) {
286 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; 296 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
287 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; 297 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
288 case 1: *gpr = *(u8 *)run->mmio.data; break; 298 case 1: gpr = *(u8 *)run->mmio.data; break;
289 } 299 }
290 } 300 }
301
302 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
291} 303}
292 304
293int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 305int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 341aff2687a5..cd128b07beda 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -288,46 +288,30 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
288 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 288 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
289 sb->s_magic = HYPFS_MAGIC; 289 sb->s_magic = HYPFS_MAGIC;
290 sb->s_op = &hypfs_s_ops; 290 sb->s_op = &hypfs_s_ops;
291 if (hypfs_parse_options(data, sb)) { 291 if (hypfs_parse_options(data, sb))
292 rc = -EINVAL; 292 return -EINVAL;
293 goto err_alloc;
294 }
295 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755); 293 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
296 if (!root_inode) { 294 if (!root_inode)
297 rc = -ENOMEM; 295 return -ENOMEM;
298 goto err_alloc;
299 }
300 root_inode->i_op = &simple_dir_inode_operations; 296 root_inode->i_op = &simple_dir_inode_operations;
301 root_inode->i_fop = &simple_dir_operations; 297 root_inode->i_fop = &simple_dir_operations;
302 root_dentry = d_alloc_root(root_inode); 298 sb->s_root = root_dentry = d_alloc_root(root_inode);
303 if (!root_dentry) { 299 if (!root_dentry) {
304 iput(root_inode); 300 iput(root_inode);
305 rc = -ENOMEM; 301 return -ENOMEM;
306 goto err_alloc;
307 } 302 }
308 if (MACHINE_IS_VM) 303 if (MACHINE_IS_VM)
309 rc = hypfs_vm_create_files(sb, root_dentry); 304 rc = hypfs_vm_create_files(sb, root_dentry);
310 else 305 else
311 rc = hypfs_diag_create_files(sb, root_dentry); 306 rc = hypfs_diag_create_files(sb, root_dentry);
312 if (rc) 307 if (rc)
313 goto err_tree; 308 return rc;
314 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 309 sbi->update_file = hypfs_create_update_file(sb, root_dentry);
315 if (IS_ERR(sbi->update_file)) { 310 if (IS_ERR(sbi->update_file))
316 rc = PTR_ERR(sbi->update_file); 311 return PTR_ERR(sbi->update_file);
317 goto err_tree;
318 }
319 hypfs_update_update(sb); 312 hypfs_update_update(sb);
320 sb->s_root = root_dentry;
321 pr_info("Hypervisor filesystem mounted\n"); 313 pr_info("Hypervisor filesystem mounted\n");
322 return 0; 314 return 0;
323
324err_tree:
325 hypfs_delete_tree(root_dentry);
326 d_genocide(root_dentry);
327 dput(root_dentry);
328err_alloc:
329 kfree(sbi);
330 return rc;
331} 315}
332 316
333static int hypfs_get_super(struct file_system_type *fst, int flags, 317static int hypfs_get_super(struct file_system_type *fst, int flags,
@@ -340,12 +324,12 @@ static void hypfs_kill_super(struct super_block *sb)
340{ 324{
341 struct hypfs_sb_info *sb_info = sb->s_fs_info; 325 struct hypfs_sb_info *sb_info = sb->s_fs_info;
342 326
343 if (sb->s_root) { 327 if (sb->s_root)
344 hypfs_delete_tree(sb->s_root); 328 hypfs_delete_tree(sb->s_root);
329 if (sb_info->update_file)
345 hypfs_remove(sb_info->update_file); 330 hypfs_remove(sb_info->update_file);
346 kfree(sb->s_fs_info); 331 kfree(sb->s_fs_info);
347 sb->s_fs_info = NULL; 332 sb->s_fs_info = NULL;
348 }
349 kill_litter_super(sb); 333 kill_litter_super(sb);
350} 334}
351 335
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3fa0a10e4668..49292869a5cd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -242,6 +242,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
242 kvm_free_physmem(kvm); 242 kvm_free_physmem(kvm);
243 free_page((unsigned long)(kvm->arch.sca)); 243 free_page((unsigned long)(kvm->arch.sca));
244 debug_unregister(kvm->arch.dbf); 244 debug_unregister(kvm->arch.dbf);
245 cleanup_srcu_struct(&kvm->srcu);
245 kfree(kvm); 246 kfree(kvm);
246} 247}
247 248
@@ -690,14 +691,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
690} 691}
691 692
692/* Section: memory related */ 693/* Section: memory related */
693int kvm_arch_set_memory_region(struct kvm *kvm, 694int kvm_arch_prepare_memory_region(struct kvm *kvm,
694 struct kvm_userspace_memory_region *mem, 695 struct kvm_memory_slot *memslot,
695 struct kvm_memory_slot old, 696 struct kvm_memory_slot old,
696 int user_alloc) 697 struct kvm_userspace_memory_region *mem,
698 int user_alloc)
697{ 699{
698 int i;
699 struct kvm_vcpu *vcpu;
700
701 /* A few sanity checks. We can have exactly one memory slot which has 700 /* A few sanity checks. We can have exactly one memory slot which has
702 to start at guest virtual zero and which has to be located at a 701 to start at guest virtual zero and which has to be located at a
703 page boundary in userland and which has to end at a page boundary. 702 page boundary in userland and which has to end at a page boundary.
@@ -720,14 +719,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
720 if (!user_alloc) 719 if (!user_alloc)
721 return -EINVAL; 720 return -EINVAL;
722 721
722 return 0;
723}
724
725void kvm_arch_commit_memory_region(struct kvm *kvm,
726 struct kvm_userspace_memory_region *mem,
727 struct kvm_memory_slot old,
728 int user_alloc)
729{
730 int i;
731 struct kvm_vcpu *vcpu;
732
723 /* request update of sie control block for all available vcpus */ 733 /* request update of sie control block for all available vcpus */
724 kvm_for_each_vcpu(i, vcpu, kvm) { 734 kvm_for_each_vcpu(i, vcpu, kvm) {
725 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 735 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
726 continue; 736 continue;
727 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); 737 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
728 } 738 }
729
730 return 0;
731} 739}
732 740
733void kvm_arch_flush_shadow(struct kvm *kvm) 741void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 06cce8285ba0..60f09ab3672c 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,10 +67,14 @@ static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
67 67
68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) 68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
69{ 69{
70 int idx;
70 struct kvm_memory_slot *mem; 71 struct kvm_memory_slot *mem;
72 struct kvm_memslots *memslots;
71 73
72 down_read(&vcpu->kvm->slots_lock); 74 idx = srcu_read_lock(&vcpu->kvm->srcu);
73 mem = &vcpu->kvm->memslots[0]; 75 memslots = rcu_dereference(vcpu->kvm->memslots);
76
77 mem = &memslots->memslots[0];
74 78
75 vcpu->arch.sie_block->gmsor = mem->userspace_addr; 79 vcpu->arch.sie_block->gmsor = mem->userspace_addr;
76 vcpu->arch.sie_block->gmslm = 80 vcpu->arch.sie_block->gmslm =
@@ -78,7 +82,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
78 (mem->npages << PAGE_SHIFT) + 82 (mem->npages << PAGE_SHIFT) +
79 VIRTIODESCSPACE - 1ul; 83 VIRTIODESCSPACE - 1ul;
80 84
81 up_read(&vcpu->kvm->slots_lock); 85 srcu_read_unlock(&vcpu->kvm->srcu, idx);
82} 86}
83 87
84/* implemented in priv.c */ 88/* implemented in priv.c */
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index 99a1f191497b..6a8d078070ca 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.33
4# Mon Jan 11 23:20:31 2010 4# Wed Mar 3 02:52:23 2010
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -9,6 +9,8 @@ CONFIG_SPARC32=y
9# CONFIG_SPARC64 is not set 9# CONFIG_SPARC64 is not set
10CONFIG_ARCH_DEFCONFIG="arch/sparc/configs/sparc32_defconfig" 10CONFIG_ARCH_DEFCONFIG="arch/sparc/configs/sparc32_defconfig"
11CONFIG_BITS=32 11CONFIG_BITS=32
12CONFIG_GENERIC_TIME=y
13CONFIG_ARCH_USES_GETTIMEOFFSET=y
12CONFIG_AUDIT_ARCH=y 14CONFIG_AUDIT_ARCH=y
13CONFIG_MMU=y 15CONFIG_MMU=y
14CONFIG_HIGHMEM=y 16CONFIG_HIGHMEM=y
@@ -48,11 +50,6 @@ CONFIG_RCU_FANOUT=32
48# CONFIG_TREE_RCU_TRACE is not set 50# CONFIG_TREE_RCU_TRACE is not set
49# CONFIG_IKCONFIG is not set 51# CONFIG_IKCONFIG is not set
50CONFIG_LOG_BUF_SHIFT=14 52CONFIG_LOG_BUF_SHIFT=14
51CONFIG_GROUP_SCHED=y
52CONFIG_FAIR_GROUP_SCHED=y
53CONFIG_RT_GROUP_SCHED=y
54CONFIG_USER_SCHED=y
55# CONFIG_CGROUP_SCHED is not set
56# CONFIG_CGROUPS is not set 53# CONFIG_CGROUPS is not set
57CONFIG_SYSFS_DEPRECATED=y 54CONFIG_SYSFS_DEPRECATED=y
58CONFIG_SYSFS_DEPRECATED_V2=y 55CONFIG_SYSFS_DEPRECATED_V2=y
@@ -68,6 +65,7 @@ CONFIG_INITRAMFS_SOURCE=""
68CONFIG_RD_GZIP=y 65CONFIG_RD_GZIP=y
69CONFIG_RD_BZIP2=y 66CONFIG_RD_BZIP2=y
70CONFIG_RD_LZMA=y 67CONFIG_RD_LZMA=y
68CONFIG_RD_LZO=y
71# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
72CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
73CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -211,7 +209,6 @@ CONFIG_SBUSCHAR=y
211CONFIG_PCI=y 209CONFIG_PCI=y
212CONFIG_PCI_SYSCALL=y 210CONFIG_PCI_SYSCALL=y
213# CONFIG_ARCH_SUPPORTS_MSI is not set 211# CONFIG_ARCH_SUPPORTS_MSI is not set
214CONFIG_PCI_LEGACY=y
215# CONFIG_PCI_DEBUG is not set 212# CONFIG_PCI_DEBUG is not set
216# CONFIG_PCI_STUB is not set 213# CONFIG_PCI_STUB is not set
217# CONFIG_PCI_IOV is not set 214# CONFIG_PCI_IOV is not set
@@ -232,7 +229,6 @@ CONFIG_NET=y
232# Networking options 229# Networking options
233# 230#
234CONFIG_PACKET=y 231CONFIG_PACKET=y
235# CONFIG_PACKET_MMAP is not set
236CONFIG_UNIX=y 232CONFIG_UNIX=y
237CONFIG_XFRM=y 233CONFIG_XFRM=y
238CONFIG_XFRM_USER=m 234CONFIG_XFRM_USER=m
@@ -379,11 +375,13 @@ CONFIG_MISC_DEVICES=y
379# CONFIG_TIFM_CORE is not set 375# CONFIG_TIFM_CORE is not set
380# CONFIG_ENCLOSURE_SERVICES is not set 376# CONFIG_ENCLOSURE_SERVICES is not set
381# CONFIG_HP_ILO is not set 377# CONFIG_HP_ILO is not set
378# CONFIG_TI_DAC7512 is not set
382# CONFIG_C2PORT is not set 379# CONFIG_C2PORT is not set
383 380
384# 381#
385# EEPROM support 382# EEPROM support
386# 383#
384# CONFIG_EEPROM_AT25 is not set
387# CONFIG_EEPROM_93CX6 is not set 385# CONFIG_EEPROM_93CX6 is not set
388# CONFIG_CB710_CORE is not set 386# CONFIG_CB710_CORE is not set
389CONFIG_HAVE_IDE=y 387CONFIG_HAVE_IDE=y
@@ -507,7 +505,9 @@ CONFIG_SUNQE=m
507# CONFIG_SUNGEM is not set 505# CONFIG_SUNGEM is not set
508# CONFIG_CASSINI is not set 506# CONFIG_CASSINI is not set
509# CONFIG_NET_VENDOR_3COM is not set 507# CONFIG_NET_VENDOR_3COM is not set
508# CONFIG_ENC28J60 is not set
510# CONFIG_ETHOC is not set 509# CONFIG_ETHOC is not set
510# CONFIG_GRETH is not set
511# CONFIG_DNET is not set 511# CONFIG_DNET is not set
512# CONFIG_NET_TULIP is not set 512# CONFIG_NET_TULIP is not set
513# CONFIG_HP100 is not set 513# CONFIG_HP100 is not set
@@ -521,6 +521,7 @@ CONFIG_SUNQE=m
521# CONFIG_NET_PCI is not set 521# CONFIG_NET_PCI is not set
522# CONFIG_B44 is not set 522# CONFIG_B44 is not set
523# CONFIG_KS8842 is not set 523# CONFIG_KS8842 is not set
524# CONFIG_KS8851 is not set
524# CONFIG_KS8851_MLL is not set 525# CONFIG_KS8851_MLL is not set
525# CONFIG_ATL2 is not set 526# CONFIG_ATL2 is not set
526CONFIG_NETDEV_1000=y 527CONFIG_NETDEV_1000=y
@@ -563,6 +564,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
563# CONFIG_MLX4_CORE is not set 564# CONFIG_MLX4_CORE is not set
564# CONFIG_TEHUTI is not set 565# CONFIG_TEHUTI is not set
565# CONFIG_BNX2X is not set 566# CONFIG_BNX2X is not set
567# CONFIG_QLCNIC is not set
566# CONFIG_QLGE is not set 568# CONFIG_QLGE is not set
567# CONFIG_SFC is not set 569# CONFIG_SFC is not set
568# CONFIG_BE2NET is not set 570# CONFIG_BE2NET is not set
@@ -665,6 +667,7 @@ CONFIG_DEVKMEM=y
665# 667#
666# Non-8250 serial port support 668# Non-8250 serial port support
667# 669#
670# CONFIG_SERIAL_MAX3100 is not set
668CONFIG_SERIAL_SUNCORE=y 671CONFIG_SERIAL_SUNCORE=y
669CONFIG_SERIAL_SUNZILOG=y 672CONFIG_SERIAL_SUNZILOG=y
670CONFIG_SERIAL_SUNZILOG_CONSOLE=y 673CONFIG_SERIAL_SUNZILOG_CONSOLE=y
@@ -689,7 +692,23 @@ CONFIG_HW_RANDOM=m
689# CONFIG_TCG_TPM is not set 692# CONFIG_TCG_TPM is not set
690CONFIG_DEVPORT=y 693CONFIG_DEVPORT=y
691# CONFIG_I2C is not set 694# CONFIG_I2C is not set
692# CONFIG_SPI is not set 695CONFIG_SPI=y
696# CONFIG_SPI_DEBUG is not set
697CONFIG_SPI_MASTER=y
698
699#
700# SPI Master Controller Drivers
701#
702CONFIG_SPI_BITBANG=m
703CONFIG_SPI_XILINX=m
704CONFIG_SPI_XILINX_PLTFM=m
705# CONFIG_SPI_DESIGNWARE is not set
706
707#
708# SPI Protocol Masters
709#
710# CONFIG_SPI_SPIDEV is not set
711# CONFIG_SPI_TLE62X0 is not set
693 712
694# 713#
695# PPS support 714# PPS support
@@ -706,10 +725,13 @@ CONFIG_HWMON=y
706# 725#
707# Native drivers 726# Native drivers
708# 727#
728# CONFIG_SENSORS_ADCXX is not set
709# CONFIG_SENSORS_I5K_AMB is not set 729# CONFIG_SENSORS_I5K_AMB is not set
710# CONFIG_SENSORS_F71805F is not set 730# CONFIG_SENSORS_F71805F is not set
711# CONFIG_SENSORS_F71882FG is not set 731# CONFIG_SENSORS_F71882FG is not set
712# CONFIG_SENSORS_IT87 is not set 732# CONFIG_SENSORS_IT87 is not set
733# CONFIG_SENSORS_LM70 is not set
734# CONFIG_SENSORS_MAX1111 is not set
713# CONFIG_SENSORS_PC87360 is not set 735# CONFIG_SENSORS_PC87360 is not set
714# CONFIG_SENSORS_PC87427 is not set 736# CONFIG_SENSORS_PC87427 is not set
715# CONFIG_SENSORS_SIS5595 is not set 737# CONFIG_SENSORS_SIS5595 is not set
@@ -720,6 +742,7 @@ CONFIG_HWMON=y
720# CONFIG_SENSORS_VT8231 is not set 742# CONFIG_SENSORS_VT8231 is not set
721# CONFIG_SENSORS_W83627HF is not set 743# CONFIG_SENSORS_W83627HF is not set
722# CONFIG_SENSORS_W83627EHF is not set 744# CONFIG_SENSORS_W83627EHF is not set
745# CONFIG_SENSORS_LIS3_SPI is not set
723# CONFIG_THERMAL is not set 746# CONFIG_THERMAL is not set
724# CONFIG_WATCHDOG is not set 747# CONFIG_WATCHDOG is not set
725CONFIG_SSB_POSSIBLE=y 748CONFIG_SSB_POSSIBLE=y
@@ -736,6 +759,8 @@ CONFIG_SSB_POSSIBLE=y
736# CONFIG_MFD_SM501 is not set 759# CONFIG_MFD_SM501 is not set
737# CONFIG_HTC_PASIC3 is not set 760# CONFIG_HTC_PASIC3 is not set
738# CONFIG_MFD_TMIO is not set 761# CONFIG_MFD_TMIO is not set
762# CONFIG_MFD_MC13783 is not set
763# CONFIG_AB4500_CORE is not set
739# CONFIG_REGULATOR is not set 764# CONFIG_REGULATOR is not set
740# CONFIG_MEDIA_SUPPORT is not set 765# CONFIG_MEDIA_SUPPORT is not set
741 766
@@ -743,6 +768,7 @@ CONFIG_SSB_POSSIBLE=y
743# Graphics support 768# Graphics support
744# 769#
745CONFIG_VGA_ARB=y 770CONFIG_VGA_ARB=y
771CONFIG_VGA_ARB_MAX_GPUS=16
746# CONFIG_VGASTATE is not set 772# CONFIG_VGASTATE is not set
747# CONFIG_VIDEO_OUTPUT_CONTROL is not set 773# CONFIG_VIDEO_OUTPUT_CONTROL is not set
748# CONFIG_FB is not set 774# CONFIG_FB is not set
@@ -808,6 +834,14 @@ CONFIG_RTC_INTF_DEV=y
808# 834#
809# SPI RTC drivers 835# SPI RTC drivers
810# 836#
837# CONFIG_RTC_DRV_M41T94 is not set
838# CONFIG_RTC_DRV_DS1305 is not set
839# CONFIG_RTC_DRV_DS1390 is not set
840# CONFIG_RTC_DRV_MAX6902 is not set
841# CONFIG_RTC_DRV_R9701 is not set
842# CONFIG_RTC_DRV_RS5C348 is not set
843# CONFIG_RTC_DRV_DS3234 is not set
844# CONFIG_RTC_DRV_PCF2123 is not set
811 845
812# 846#
813# Platform RTC drivers 847# Platform RTC drivers
@@ -1180,9 +1214,11 @@ CONFIG_CRC32=y
1180CONFIG_LIBCRC32C=m 1214CONFIG_LIBCRC32C=m
1181CONFIG_ZLIB_INFLATE=y 1215CONFIG_ZLIB_INFLATE=y
1182CONFIG_ZLIB_DEFLATE=y 1216CONFIG_ZLIB_DEFLATE=y
1217CONFIG_LZO_DECOMPRESS=y
1183CONFIG_DECOMPRESS_GZIP=y 1218CONFIG_DECOMPRESS_GZIP=y
1184CONFIG_DECOMPRESS_BZIP2=y 1219CONFIG_DECOMPRESS_BZIP2=y
1185CONFIG_DECOMPRESS_LZMA=y 1220CONFIG_DECOMPRESS_LZMA=y
1221CONFIG_DECOMPRESS_LZO=y
1186CONFIG_HAS_IOMEM=y 1222CONFIG_HAS_IOMEM=y
1187CONFIG_HAS_IOPORT=y 1223CONFIG_HAS_IOPORT=y
1188CONFIG_HAS_DMA=y 1224CONFIG_HAS_DMA=y
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 41c5a56aa6f2..56e3163673e3 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.33
4# Wed Jan 20 16:31:47 2010 4# Wed Mar 3 02:54:29 2010
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -55,14 +55,10 @@ CONFIG_TREE_RCU=y
55# CONFIG_RCU_TRACE is not set 55# CONFIG_RCU_TRACE is not set
56CONFIG_RCU_FANOUT=64 56CONFIG_RCU_FANOUT=64
57# CONFIG_RCU_FANOUT_EXACT is not set 57# CONFIG_RCU_FANOUT_EXACT is not set
58# CONFIG_RCU_FAST_NO_HZ is not set
58# CONFIG_TREE_RCU_TRACE is not set 59# CONFIG_TREE_RCU_TRACE is not set
59# CONFIG_IKCONFIG is not set 60# CONFIG_IKCONFIG is not set
60CONFIG_LOG_BUF_SHIFT=18 61CONFIG_LOG_BUF_SHIFT=18
61CONFIG_GROUP_SCHED=y
62CONFIG_FAIR_GROUP_SCHED=y
63CONFIG_RT_GROUP_SCHED=y
64CONFIG_USER_SCHED=y
65# CONFIG_CGROUP_SCHED is not set
66# CONFIG_CGROUPS is not set 62# CONFIG_CGROUPS is not set
67# CONFIG_SYSFS_DEPRECATED_V2 is not set 63# CONFIG_SYSFS_DEPRECATED_V2 is not set
68CONFIG_RELAY=y 64CONFIG_RELAY=y
@@ -77,6 +73,7 @@ CONFIG_INITRAMFS_SOURCE=""
77CONFIG_RD_GZIP=y 73CONFIG_RD_GZIP=y
78CONFIG_RD_BZIP2=y 74CONFIG_RD_BZIP2=y
79CONFIG_RD_LZMA=y 75CONFIG_RD_LZMA=y
76CONFIG_RD_LZO=y
80CONFIG_CC_OPTIMIZE_FOR_SIZE=y 77CONFIG_CC_OPTIMIZE_FOR_SIZE=y
81CONFIG_SYSCTL=y 78CONFIG_SYSCTL=y
82CONFIG_ANON_INODES=y 79CONFIG_ANON_INODES=y
@@ -105,7 +102,6 @@ CONFIG_PERF_USE_VMALLOC=y
105# Kernel Performance Events And Counters 102# Kernel Performance Events And Counters
106# 103#
107CONFIG_PERF_EVENTS=y 104CONFIG_PERF_EVENTS=y
108CONFIG_EVENT_PROFILE=y
109CONFIG_PERF_COUNTERS=y 105CONFIG_PERF_COUNTERS=y
110# CONFIG_DEBUG_PERF_USE_VMALLOC is not set 106# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
111CONFIG_VM_EVENT_COUNTERS=y 107CONFIG_VM_EVENT_COUNTERS=y
@@ -266,7 +262,6 @@ CONFIG_PCI_DOMAINS=y
266CONFIG_PCI_SYSCALL=y 262CONFIG_PCI_SYSCALL=y
267CONFIG_ARCH_SUPPORTS_MSI=y 263CONFIG_ARCH_SUPPORTS_MSI=y
268CONFIG_PCI_MSI=y 264CONFIG_PCI_MSI=y
269# CONFIG_PCI_LEGACY is not set
270# CONFIG_PCI_DEBUG is not set 265# CONFIG_PCI_DEBUG is not set
271# CONFIG_PCI_STUB is not set 266# CONFIG_PCI_STUB is not set
272# CONFIG_PCI_IOV is not set 267# CONFIG_PCI_IOV is not set
@@ -290,7 +285,6 @@ CONFIG_NET=y
290# Networking options 285# Networking options
291# 286#
292CONFIG_PACKET=y 287CONFIG_PACKET=y
293CONFIG_PACKET_MMAP=y
294CONFIG_UNIX=y 288CONFIG_UNIX=y
295CONFIG_XFRM=y 289CONFIG_XFRM=y
296CONFIG_XFRM_USER=m 290CONFIG_XFRM_USER=m
@@ -425,10 +419,6 @@ CONFIG_BLK_DEV=y
425# CONFIG_BLK_DEV_COW_COMMON is not set 419# CONFIG_BLK_DEV_COW_COMMON is not set
426CONFIG_BLK_DEV_LOOP=m 420CONFIG_BLK_DEV_LOOP=m
427CONFIG_BLK_DEV_CRYPTOLOOP=m 421CONFIG_BLK_DEV_CRYPTOLOOP=m
428
429#
430# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
431#
432# CONFIG_BLK_DEV_DRBD is not set 422# CONFIG_BLK_DEV_DRBD is not set
433CONFIG_BLK_DEV_NBD=m 423CONFIG_BLK_DEV_NBD=m
434# CONFIG_BLK_DEV_SX8 is not set 424# CONFIG_BLK_DEV_SX8 is not set
@@ -677,6 +667,7 @@ CONFIG_SUNGEM=m
677CONFIG_SUNVNET=m 667CONFIG_SUNVNET=m
678# CONFIG_NET_VENDOR_3COM is not set 668# CONFIG_NET_VENDOR_3COM is not set
679# CONFIG_ETHOC is not set 669# CONFIG_ETHOC is not set
670# CONFIG_GRETH is not set
680# CONFIG_DNET is not set 671# CONFIG_DNET is not set
681# CONFIG_NET_TULIP is not set 672# CONFIG_NET_TULIP is not set
682# CONFIG_HP100 is not set 673# CONFIG_HP100 is not set
@@ -691,6 +682,7 @@ CONFIG_NET_PCI=y
691# CONFIG_PCNET32 is not set 682# CONFIG_PCNET32 is not set
692# CONFIG_AMD8111_ETH is not set 683# CONFIG_AMD8111_ETH is not set
693# CONFIG_ADAPTEC_STARFIRE is not set 684# CONFIG_ADAPTEC_STARFIRE is not set
685# CONFIG_KSZ884X_PCI is not set
694# CONFIG_B44 is not set 686# CONFIG_B44 is not set
695# CONFIG_FORCEDETH is not set 687# CONFIG_FORCEDETH is not set
696# CONFIG_E100 is not set 688# CONFIG_E100 is not set
@@ -741,6 +733,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
741# CONFIG_CHELSIO_T3 is not set 733# CONFIG_CHELSIO_T3 is not set
742# CONFIG_ENIC is not set 734# CONFIG_ENIC is not set
743# CONFIG_IXGBE is not set 735# CONFIG_IXGBE is not set
736# CONFIG_IXGBEVF is not set
744# CONFIG_IXGB is not set 737# CONFIG_IXGB is not set
745# CONFIG_S2IO is not set 738# CONFIG_S2IO is not set
746# CONFIG_VXGE is not set 739# CONFIG_VXGE is not set
@@ -751,6 +744,7 @@ CONFIG_NIU=m
751# CONFIG_MLX4_CORE is not set 744# CONFIG_MLX4_CORE is not set
752# CONFIG_TEHUTI is not set 745# CONFIG_TEHUTI is not set
753# CONFIG_BNX2X is not set 746# CONFIG_BNX2X is not set
747# CONFIG_QLCNIC is not set
754# CONFIG_QLGE is not set 748# CONFIG_QLGE is not set
755# CONFIG_SFC is not set 749# CONFIG_SFC is not set
756# CONFIG_BE2NET is not set 750# CONFIG_BE2NET is not set
@@ -1028,6 +1022,7 @@ CONFIG_HWMON=y
1028# CONFIG_SENSORS_SMSC47M192 is not set 1022# CONFIG_SENSORS_SMSC47M192 is not set
1029# CONFIG_SENSORS_SMSC47B397 is not set 1023# CONFIG_SENSORS_SMSC47B397 is not set
1030# CONFIG_SENSORS_ADS7828 is not set 1024# CONFIG_SENSORS_ADS7828 is not set
1025# CONFIG_SENSORS_AMC6821 is not set
1031# CONFIG_SENSORS_THMC50 is not set 1026# CONFIG_SENSORS_THMC50 is not set
1032# CONFIG_SENSORS_TMP401 is not set 1027# CONFIG_SENSORS_TMP401 is not set
1033# CONFIG_SENSORS_TMP421 is not set 1028# CONFIG_SENSORS_TMP421 is not set
@@ -1076,6 +1071,7 @@ CONFIG_SSB_POSSIBLE=y
1076# Graphics support 1071# Graphics support
1077# 1072#
1078CONFIG_VGA_ARB=y 1073CONFIG_VGA_ARB=y
1074CONFIG_VGA_ARB_MAX_GPUS=16
1079# CONFIG_DRM is not set 1075# CONFIG_DRM is not set
1080# CONFIG_VGASTATE is not set 1076# CONFIG_VGASTATE is not set
1081# CONFIG_VIDEO_OUTPUT_CONTROL is not set 1077# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1279,6 +1275,7 @@ CONFIG_SND_ALI5451=m
1279# CONFIG_SND_YMFPCI is not set 1275# CONFIG_SND_YMFPCI is not set
1280CONFIG_SND_USB=y 1276CONFIG_SND_USB=y
1281# CONFIG_SND_USB_AUDIO is not set 1277# CONFIG_SND_USB_AUDIO is not set
1278# CONFIG_SND_USB_UA101 is not set
1282# CONFIG_SND_USB_CAIAQ is not set 1279# CONFIG_SND_USB_CAIAQ is not set
1283CONFIG_SND_SPARC=y 1280CONFIG_SND_SPARC=y
1284# CONFIG_SND_SUN_AMD7930 is not set 1281# CONFIG_SND_SUN_AMD7930 is not set
@@ -1301,6 +1298,7 @@ CONFIG_USB_HIDDEV=y
1301# 1298#
1302# Special HID drivers 1299# Special HID drivers
1303# 1300#
1301# CONFIG_HID_3M_PCT is not set
1304CONFIG_HID_A4TECH=y 1302CONFIG_HID_A4TECH=y
1305CONFIG_HID_APPLE=y 1303CONFIG_HID_APPLE=y
1306CONFIG_HID_BELKIN=y 1304CONFIG_HID_BELKIN=y
@@ -1317,14 +1315,19 @@ CONFIG_HID_KENSINGTON=y
1317CONFIG_HID_LOGITECH=y 1315CONFIG_HID_LOGITECH=y
1318# CONFIG_LOGITECH_FF is not set 1316# CONFIG_LOGITECH_FF is not set
1319# CONFIG_LOGIRUMBLEPAD2_FF is not set 1317# CONFIG_LOGIRUMBLEPAD2_FF is not set
1318# CONFIG_LOGIG940_FF is not set
1320CONFIG_HID_MICROSOFT=y 1319CONFIG_HID_MICROSOFT=y
1320# CONFIG_HID_MOSART is not set
1321CONFIG_HID_MONTEREY=y 1321CONFIG_HID_MONTEREY=y
1322CONFIG_HID_NTRIG=y 1322CONFIG_HID_NTRIG=y
1323CONFIG_HID_ORTEK=y
1323CONFIG_HID_PANTHERLORD=y 1324CONFIG_HID_PANTHERLORD=y
1324# CONFIG_PANTHERLORD_FF is not set 1325# CONFIG_PANTHERLORD_FF is not set
1325CONFIG_HID_PETALYNX=y 1326CONFIG_HID_PETALYNX=y
1327# CONFIG_HID_QUANTA is not set
1326CONFIG_HID_SAMSUNG=y 1328CONFIG_HID_SAMSUNG=y
1327CONFIG_HID_SONY=y 1329CONFIG_HID_SONY=y
1330# CONFIG_HID_STANTUM is not set
1328CONFIG_HID_SUNPLUS=y 1331CONFIG_HID_SUNPLUS=y
1329CONFIG_HID_GREENASIA=y 1332CONFIG_HID_GREENASIA=y
1330# CONFIG_GREENASIA_FF is not set 1333# CONFIG_GREENASIA_FF is not set
@@ -1807,6 +1810,7 @@ CONFIG_CRYPTO_MANAGER=y
1807CONFIG_CRYPTO_MANAGER2=y 1810CONFIG_CRYPTO_MANAGER2=y
1808CONFIG_CRYPTO_GF128MUL=m 1811CONFIG_CRYPTO_GF128MUL=m
1809CONFIG_CRYPTO_NULL=m 1812CONFIG_CRYPTO_NULL=m
1813# CONFIG_CRYPTO_PCRYPT is not set
1810CONFIG_CRYPTO_WORKQUEUE=y 1814CONFIG_CRYPTO_WORKQUEUE=y
1811# CONFIG_CRYPTO_CRYPTD is not set 1815# CONFIG_CRYPTO_CRYPTD is not set
1812CONFIG_CRYPTO_AUTHENC=y 1816CONFIG_CRYPTO_AUTHENC=y
@@ -1904,9 +1908,11 @@ CONFIG_CRC32=y
1904CONFIG_LIBCRC32C=m 1908CONFIG_LIBCRC32C=m
1905CONFIG_ZLIB_INFLATE=y 1909CONFIG_ZLIB_INFLATE=y
1906CONFIG_ZLIB_DEFLATE=y 1910CONFIG_ZLIB_DEFLATE=y
1911CONFIG_LZO_DECOMPRESS=y
1907CONFIG_DECOMPRESS_GZIP=y 1912CONFIG_DECOMPRESS_GZIP=y
1908CONFIG_DECOMPRESS_BZIP2=y 1913CONFIG_DECOMPRESS_BZIP2=y
1909CONFIG_DECOMPRESS_LZMA=y 1914CONFIG_DECOMPRESS_LZMA=y
1915CONFIG_DECOMPRESS_LZO=y
1910CONFIG_HAS_IOMEM=y 1916CONFIG_HAS_IOMEM=y
1911CONFIG_HAS_IOPORT=y 1917CONFIG_HAS_IOPORT=y
1912CONFIG_HAS_DMA=y 1918CONFIG_HAS_DMA=y
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 679c7504625a..2889574608db 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -249,10 +249,14 @@ extern void iounmap(volatile void __iomem *addr);
249 249
250#define ioread8(X) readb(X) 250#define ioread8(X) readb(X)
251#define ioread16(X) readw(X) 251#define ioread16(X) readw(X)
252#define ioread16be(X) __raw_readw(X)
252#define ioread32(X) readl(X) 253#define ioread32(X) readl(X)
254#define ioread32be(X) __raw_readl(X)
253#define iowrite8(val,X) writeb(val,X) 255#define iowrite8(val,X) writeb(val,X)
254#define iowrite16(val,X) writew(val,X) 256#define iowrite16(val,X) writew(val,X)
257#define iowrite16be(val,X) __raw_writew(val,X)
255#define iowrite32(val,X) writel(val,X) 258#define iowrite32(val,X) writel(val,X)
259#define iowrite32be(val,X) __raw_writel(val,X)
256 260
257static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count) 261static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
258{ 262{
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 4aee21dc9c6f..9517d063c79c 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -468,10 +468,14 @@ static inline void iounmap(volatile void __iomem *addr)
468 468
469#define ioread8(X) readb(X) 469#define ioread8(X) readb(X)
470#define ioread16(X) readw(X) 470#define ioread16(X) readw(X)
471#define ioread16be(X) __raw_readw(X)
471#define ioread32(X) readl(X) 472#define ioread32(X) readl(X)
473#define ioread32be(X) __raw_readl(X)
472#define iowrite8(val,X) writeb(val,X) 474#define iowrite8(val,X) writeb(val,X)
473#define iowrite16(val,X) writew(val,X) 475#define iowrite16(val,X) writew(val,X)
476#define iowrite16be(val,X) __raw_writew(val,X)
474#define iowrite32(val,X) writel(val,X) 477#define iowrite32(val,X) writel(val,X)
478#define iowrite32be(val,X) __raw_writel(val,X)
475 479
476/* Create a virtual mapping cookie for an IO port range */ 480/* Create a virtual mapping cookie for an IO port range */
477extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 481extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/perfctr.h b/arch/sparc/include/asm/perfctr.h
index 836873002b75..8d8720a8770d 100644
--- a/arch/sparc/include/asm/perfctr.h
+++ b/arch/sparc/include/asm/perfctr.h
@@ -10,8 +10,8 @@
10 * from enumeration below. The meaning of further arguments 10 * from enumeration below. The meaning of further arguments
11 * are determined by the operation code. 11 * are determined by the operation code.
12 * 12 *
13 * int sys_perfctr(int opcode, unsigned long arg0, 13 * NOTE: This system call is no longer provided, use the perf_events
14 * unsigned long arg1, unsigned long arg2) 14 * infrastructure.
15 * 15 *
16 * Pointers which are passed by the user are pointers to 64-bit 16 * Pointers which are passed by the user are pointers to 64-bit
17 * integers. 17 * integers.
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index d47a98e66972..d24cfe16afc1 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -143,15 +143,7 @@ do { \
143 * and 2 stores in this critical code path. -DaveM 143 * and 2 stores in this critical code path. -DaveM
144 */ 144 */
145#define switch_to(prev, next, last) \ 145#define switch_to(prev, next, last) \
146do { if (test_thread_flag(TIF_PERFCTR)) { \ 146do { flush_tlb_pending(); \
147 unsigned long __tmp; \
148 read_pcr(__tmp); \
149 current_thread_info()->pcr_reg = __tmp; \
150 read_pic(__tmp); \
151 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
152 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
153 } \
154 flush_tlb_pending(); \
155 save_and_clear_fpu(); \ 147 save_and_clear_fpu(); \
156 /* If you are tempted to conditionalize the following */ \ 148 /* If you are tempted to conditionalize the following */ \
157 /* so that ASI is only written if it changes, think again. */ \ 149 /* so that ASI is only written if it changes, think again. */ \
@@ -197,11 +189,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
197 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \ 189 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
198 "i0", "i1", "i2", "i3", "i4", "i5", \ 190 "i0", "i1", "i2", "i3", "i4", "i5", \
199 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ 191 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
200 /* If you fuck with this, update ret_from_syscall code too. */ \
201 if (test_thread_flag(TIF_PERFCTR)) { \
202 write_pcr(current_thread_info()->pcr_reg); \
203 reset_pic(); \
204 } \
205} while(0) 192} while(0)
206 193
207static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 194static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 39be9f256e5a..9e2d9447f2ad 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -58,11 +58,6 @@ struct thread_info {
58 unsigned long gsr[7]; 58 unsigned long gsr[7];
59 unsigned long xfsr[7]; 59 unsigned long xfsr[7];
60 60
61 __u64 __user *user_cntd0;
62 __u64 __user *user_cntd1;
63 __u64 kernel_cntd0, kernel_cntd1;
64 __u64 pcr_reg;
65
66 struct restart_block restart_block; 61 struct restart_block restart_block;
67 62
68 struct pt_regs *kern_una_regs; 63 struct pt_regs *kern_una_regs;
@@ -96,15 +91,10 @@ struct thread_info {
96#define TI_RWIN_SPTRS 0x000003c8 91#define TI_RWIN_SPTRS 0x000003c8
97#define TI_GSR 0x00000400 92#define TI_GSR 0x00000400
98#define TI_XFSR 0x00000438 93#define TI_XFSR 0x00000438
99#define TI_USER_CNTD0 0x00000470 94#define TI_RESTART_BLOCK 0x00000470
100#define TI_USER_CNTD1 0x00000478 95#define TI_KUNA_REGS 0x000004a0
101#define TI_KERN_CNTD0 0x00000480 96#define TI_KUNA_INSN 0x000004a8
102#define TI_KERN_CNTD1 0x00000488 97#define TI_FPREGS 0x000004c0
103#define TI_PCR 0x00000490
104#define TI_RESTART_BLOCK 0x00000498
105#define TI_KUNA_REGS 0x000004c8
106#define TI_KUNA_INSN 0x000004d0
107#define TI_FPREGS 0x00000500
108 98
109/* We embed this in the uppermost byte of thread_info->flags */ 99/* We embed this in the uppermost byte of thread_info->flags */
110#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */ 100#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
@@ -199,7 +189,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
199 * 189 *
200 * On trap return we need to test several values: 190 * On trap return we need to test several values:
201 * 191 *
202 * user: need_resched, notify_resume, sigpending, wsaved, perfctr 192 * user: need_resched, notify_resume, sigpending, wsaved
203 * kernel: fpdepth 193 * kernel: fpdepth
204 * 194 *
205 * So to check for work in the kernel case we simply load the fpdepth 195 * So to check for work in the kernel case we simply load the fpdepth
@@ -220,7 +210,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
220#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 210#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
221#define TIF_SIGPENDING 2 /* signal pending */ 211#define TIF_SIGPENDING 2 /* signal pending */
222#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 212#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
223#define TIF_PERFCTR 4 /* performance counters active */ 213/* flag bit 4 is available */
224#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ 214#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
225/* flag bit 6 is available */ 215/* flag bit 6 is available */
226#define TIF_32BIT 7 /* 32-bit binary */ 216#define TIF_32BIT 7 /* 32-bit binary */
@@ -241,7 +231,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
241#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 231#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
242#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 232#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
243#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 233#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
244#define _TIF_PERFCTR (1<<TIF_PERFCTR)
245#define _TIF_UNALIGNED (1<<TIF_UNALIGNED) 234#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
246#define _TIF_32BIT (1<<TIF_32BIT) 235#define _TIF_32BIT (1<<TIF_32BIT)
247#define _TIF_SECCOMP (1<<TIF_SECCOMP) 236#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -252,7 +241,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
252 241
253#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ 242#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
254 _TIF_DO_NOTIFY_RESUME_MASK | \ 243 _TIF_DO_NOTIFY_RESUME_MASK | \
255 _TIF_NEED_RESCHED | _TIF_PERFCTR) 244 _TIF_NEED_RESCHED)
256#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING) 245#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
257 246
258/* 247/*
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 4f53a2395ac6..c011b932bb17 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -48,7 +48,6 @@ extern void __init boot_cpu_id_too_large(int cpu);
48extern unsigned int dcache_parity_tl1_occurred; 48extern unsigned int dcache_parity_tl1_occurred;
49extern unsigned int icache_parity_tl1_occurred; 49extern unsigned int icache_parity_tl1_occurred;
50 50
51extern asmlinkage void update_perfctrs(void);
52extern asmlinkage void sparc_breakpoint(struct pt_regs *regs); 51extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
53extern void timer_interrupt(int irq, struct pt_regs *regs); 52extern void timer_interrupt(int irq, struct pt_regs *regs);
54 53
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index d242a7340541..b287b62c7ea3 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -21,7 +21,6 @@
21 21
22#include <asm/perf_event.h> 22#include <asm/perf_event.h>
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/local.h>
25#include <asm/pcr.h> 24#include <asm/pcr.h>
26 25
27/* We don't have a real NMI on sparc64, but we can fake one 26/* We don't have a real NMI on sparc64, but we can fake one
@@ -113,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
113 touched = 1; 112 touched = 1;
114 } 113 }
115 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
116 __this_cpu_inc(per_cpu_var(alert_counter)); 115 __this_cpu_inc(alert_counter);
117 if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz) 116 if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
118 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
119 regs, panic_on_timeout); 118 regs, panic_on_timeout);
120 } else { 119 } else {
121 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
122 __this_cpu_write(per_cpu_var(alert_counter), 0); 121 __this_cpu_write(alert_counter, 0);
123 } 122 }
124 if (__get_cpu_var(wd_enabled)) { 123 if (__get_cpu_var(wd_enabled)) {
125 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index cb70476bd8f5..a5cf3864b31f 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -352,12 +352,6 @@ void exit_thread(void)
352 else 352 else
353 t->utraps[0]--; 353 t->utraps[0]--;
354 } 354 }
355
356 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
357 t->user_cntd0 = t->user_cntd1 = NULL;
358 t->pcr_reg = 0;
359 write_pcr(0);
360 }
361} 355}
362 356
363void flush_thread(void) 357void flush_thread(void)
@@ -371,13 +365,6 @@ void flush_thread(void)
371 365
372 set_thread_wsaved(0); 366 set_thread_wsaved(0);
373 367
374 /* Turn off performance counters if on. */
375 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
376 t->user_cntd0 = t->user_cntd1 = NULL;
377 t->pcr_reg = 0;
378 write_pcr(0);
379 }
380
381 /* Clear FPU register state. */ 368 /* Clear FPU register state. */
382 t->fpsaved[0] = 0; 369 t->fpsaved[0] = 0;
383 370
@@ -591,16 +578,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
591 t->kregs->u_regs[UREG_FP] = 578 t->kregs->u_regs[UREG_FP] =
592 ((unsigned long) child_sf) - STACK_BIAS; 579 ((unsigned long) child_sf) - STACK_BIAS;
593 580
594 /* Special case, if we are spawning a kernel thread from
595 * a userspace task (usermode helper, NFS or similar), we
596 * must disable performance counters in the child because
597 * the address space and protection realm are changing.
598 */
599 if (t->flags & _TIF_PERFCTR) {
600 t->user_cntd0 = t->user_cntd1 = NULL;
601 t->pcr_reg = 0;
602 t->flags &= ~_TIF_PERFCTR;
603 }
604 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); 581 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
605 t->kregs->u_regs[UREG_G6] = (unsigned long) t; 582 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
606 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; 583 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index fd3cee4d117c..83f1873c6c13 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -65,48 +65,6 @@ __handle_user_windows:
65 ba,pt %xcc, __handle_user_windows_continue 65 ba,pt %xcc, __handle_user_windows_continue
66 66
67 andn %l1, %l4, %l1 67 andn %l1, %l4, %l1
68__handle_perfctrs:
69 call update_perfctrs
70 wrpr %g0, RTRAP_PSTATE, %pstate
71 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
72 ldub [%g6 + TI_WSAVED], %o2
73 brz,pt %o2, 1f
74 nop
75 /* Redo userwin+sched+sig checks */
76 call fault_in_user_windows
77
78 wrpr %g0, RTRAP_PSTATE, %pstate
79 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
80 ldx [%g6 + TI_FLAGS], %l0
81 andcc %l0, _TIF_NEED_RESCHED, %g0
82 be,pt %xcc, 1f
83
84 nop
85 call schedule
86 wrpr %g0, RTRAP_PSTATE, %pstate
87 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
88 ldx [%g6 + TI_FLAGS], %l0
891: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
90
91 be,pt %xcc, __handle_perfctrs_continue
92 sethi %hi(TSTATE_PEF), %o0
93 mov %l5, %o1
94 add %sp, PTREGS_OFF, %o0
95 mov %l0, %o2
96 call do_notify_resume
97
98 wrpr %g0, RTRAP_PSTATE, %pstate
99 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
100 /* Signal delivery can modify pt_regs tstate, so we must
101 * reload it.
102 */
103 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
104 sethi %hi(0xf << 20), %l4
105 and %l1, %l4, %l4
106 andn %l1, %l4, %l1
107 ba,pt %xcc, __handle_perfctrs_continue
108
109 sethi %hi(TSTATE_PEF), %o0
110__handle_userfpu: 68__handle_userfpu:
111 rd %fprs, %l5 69 rd %fprs, %l5
112 andcc %l5, FPRS_FEF, %g0 70 andcc %l5, FPRS_FEF, %g0
@@ -149,11 +107,11 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
149rtrap_irq: 107rtrap_irq:
150rtrap: 108rtrap:
151#ifndef CONFIG_SMP 109#ifndef CONFIG_SMP
152 sethi %hi(per_cpu____cpu_data), %l0 110 sethi %hi(__cpu_data), %l0
153 lduw [%l0 + %lo(per_cpu____cpu_data)], %l1 111 lduw [%l0 + %lo(__cpu_data)], %l1
154#else 112#else
155 sethi %hi(per_cpu____cpu_data), %l0 113 sethi %hi(__cpu_data), %l0
156 or %l0, %lo(per_cpu____cpu_data), %l0 114 or %l0, %lo(__cpu_data), %l0
157 lduw [%l0 + %g5], %l1 115 lduw [%l0 + %g5], %l1
158#endif 116#endif
159 cmp %l1, 0 117 cmp %l1, 0
@@ -191,9 +149,9 @@ rtrap_no_irq_enable:
191 * take until the next local IRQ before the signal/resched 149 * take until the next local IRQ before the signal/resched
192 * event would be handled. 150 * event would be handled.
193 * 151 *
194 * This also means that if we have to deal with performance 152 * This also means that if we have to deal with user
195 * counters or user windows, we have to redo all of these 153 * windows, we have to redo all of these sched+signal checks
196 * sched+signal checks with IRQs disabled. 154 * with IRQs disabled.
197 */ 155 */
198to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 156to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
199 wrpr 0, %pil 157 wrpr 0, %pil
@@ -214,12 +172,8 @@ __handle_signal_continue:
214 brnz,pn %o2, __handle_user_windows 172 brnz,pn %o2, __handle_user_windows
215 nop 173 nop
216__handle_user_windows_continue: 174__handle_user_windows_continue:
217 ldx [%g6 + TI_FLAGS], %l5
218 andcc %l5, _TIF_PERFCTR, %g0
219 sethi %hi(TSTATE_PEF), %o0 175 sethi %hi(TSTATE_PEF), %o0
220 bne,pn %xcc, __handle_perfctrs 176 andcc %l1, %o0, %g0
221__handle_perfctrs_continue:
222 andcc %l1, %o0, %g0
223 177
224 /* This fpdepth clear is necessary for non-syscall rtraps only */ 178 /* This fpdepth clear is necessary for non-syscall rtraps only */
225user_nowork: 179user_nowork:
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index e7061138c98a..46a76ba3fb4b 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -51,7 +51,6 @@ SIGN1(sys32_exit_group, sys_exit_group, %o0)
51SIGN1(sys32_wait4, compat_sys_wait4, %o2) 51SIGN1(sys32_wait4, compat_sys_wait4, %o2)
52SIGN1(sys32_creat, sys_creat, %o1) 52SIGN1(sys32_creat, sys_creat, %o1)
53SIGN1(sys32_mknod, sys_mknod, %o1) 53SIGN1(sys32_mknod, sys_mknod, %o1)
54SIGN1(sys32_perfctr, sys_perfctr, %o0)
55SIGN1(sys32_umount, sys_umount, %o1) 54SIGN1(sys32_umount, sys_umount, %o1)
56SIGN1(sys32_signal, sys_signal, %o0) 55SIGN1(sys32_signal, sys_signal, %o0)
57SIGN1(sys32_access, sys_access, %o1) 56SIGN1(sys32_access, sys_access, %o1)
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index d77f54316948..cb1bef6f14b7 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -27,7 +27,6 @@
27 27
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/utrap.h> 29#include <asm/utrap.h>
30#include <asm/perfctr.h>
31#include <asm/unistd.h> 30#include <asm/unistd.h>
32 31
33#include "entry.h" 32#include "entry.h"
@@ -766,109 +765,6 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
766 return ret; 765 return ret;
767} 766}
768 767
769/* Invoked by rtrap code to update performance counters in
770 * user space.
771 */
772asmlinkage void update_perfctrs(void)
773{
774 unsigned long pic, tmp;
775
776 read_pic(pic);
777 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
778 __put_user(tmp, current_thread_info()->user_cntd0);
779 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
780 __put_user(tmp, current_thread_info()->user_cntd1);
781 reset_pic();
782}
783
784SYSCALL_DEFINE4(perfctr, int, opcode, unsigned long, arg0,
785 unsigned long, arg1, unsigned long, arg2)
786{
787 int err = 0;
788
789 switch(opcode) {
790 case PERFCTR_ON:
791 current_thread_info()->pcr_reg = arg2;
792 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
793 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
794 current_thread_info()->kernel_cntd0 =
795 current_thread_info()->kernel_cntd1 = 0;
796 write_pcr(arg2);
797 reset_pic();
798 set_thread_flag(TIF_PERFCTR);
799 break;
800
801 case PERFCTR_OFF:
802 err = -EINVAL;
803 if (test_thread_flag(TIF_PERFCTR)) {
804 current_thread_info()->user_cntd0 =
805 current_thread_info()->user_cntd1 = NULL;
806 current_thread_info()->pcr_reg = 0;
807 write_pcr(0);
808 clear_thread_flag(TIF_PERFCTR);
809 err = 0;
810 }
811 break;
812
813 case PERFCTR_READ: {
814 unsigned long pic, tmp;
815
816 if (!test_thread_flag(TIF_PERFCTR)) {
817 err = -EINVAL;
818 break;
819 }
820 read_pic(pic);
821 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
822 err |= __put_user(tmp, current_thread_info()->user_cntd0);
823 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
824 err |= __put_user(tmp, current_thread_info()->user_cntd1);
825 reset_pic();
826 break;
827 }
828
829 case PERFCTR_CLRPIC:
830 if (!test_thread_flag(TIF_PERFCTR)) {
831 err = -EINVAL;
832 break;
833 }
834 current_thread_info()->kernel_cntd0 =
835 current_thread_info()->kernel_cntd1 = 0;
836 reset_pic();
837 break;
838
839 case PERFCTR_SETPCR: {
840 u64 __user *user_pcr = (u64 __user *)arg0;
841
842 if (!test_thread_flag(TIF_PERFCTR)) {
843 err = -EINVAL;
844 break;
845 }
846 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
847 write_pcr(current_thread_info()->pcr_reg);
848 current_thread_info()->kernel_cntd0 =
849 current_thread_info()->kernel_cntd1 = 0;
850 reset_pic();
851 break;
852 }
853
854 case PERFCTR_GETPCR: {
855 u64 __user *user_pcr = (u64 __user *)arg0;
856
857 if (!test_thread_flag(TIF_PERFCTR)) {
858 err = -EINVAL;
859 break;
860 }
861 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
862 break;
863 }
864
865 default:
866 err = -EINVAL;
867 break;
868 };
869 return err;
870}
871
872/* 768/*
873 * Do a system call from kernel instead of calling sys_execve so we 769 * Do a system call from kernel instead of calling sys_execve so we
874 * end up with proper pt_regs. 770 * end up with proper pt_regs.
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index dc4a458f74dc..1d7e274f3f2b 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -110,31 +110,12 @@ sys_clone:
110 110
111 .globl ret_from_syscall 111 .globl ret_from_syscall
112ret_from_syscall: 112ret_from_syscall:
113 /* Clear current_thread_info()->new_child, and 113 /* Clear current_thread_info()->new_child. */
114 * check performance counter stuff too.
115 */
116 stb %g0, [%g6 + TI_NEW_CHILD] 114 stb %g0, [%g6 + TI_NEW_CHILD]
117 ldx [%g6 + TI_FLAGS], %l0 115 ldx [%g6 + TI_FLAGS], %l0
118 call schedule_tail 116 call schedule_tail
119 mov %g7, %o0 117 mov %g7, %o0
120 andcc %l0, _TIF_PERFCTR, %g0 118 ba,pt %xcc, ret_sys_call
121 be,pt %icc, 1f
122 nop
123 ldx [%g6 + TI_PCR], %o7
124 wr %g0, %o7, %pcr
125
126 /* Blackbird errata workaround. See commentary in
127 * smp.c:smp_percpu_timer_interrupt() for more
128 * information.
129 */
130 ba,pt %xcc, 99f
131 nop
132
133 .align 64
13499: wr %g0, %g0, %pic
135 rd %pic, %g0
136
1371: ba,pt %xcc, ret_sys_call
138 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 119 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
139 120
140 .globl sparc_exit 121 .globl sparc_exit
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index d2f999ae2b85..68312fe8da74 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -36,8 +36,6 @@ extern asmlinkage long sys_rt_sigaction(int sig,
36 struct sigaction __user *oact, 36 struct sigaction __user *oact,
37 void __user *restorer, 37 void __user *restorer,
38 size_t sigsetsize); 38 size_t sigsetsize);
39extern asmlinkage long sys_perfctr(int opcode, unsigned long arg0,
40 unsigned long arg1, unsigned long arg2);
41 39
42extern asmlinkage void sparc64_set_context(struct pt_regs *regs); 40extern asmlinkage void sparc64_set_context(struct pt_regs *regs);
43extern asmlinkage void sparc64_get_context(struct pt_regs *regs); 41extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index e575b46bd7a9..17614251fb6d 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -21,7 +21,7 @@ sys_call_table32:
21/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write 21/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
22/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link 22/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
23/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod 23/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
24/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek 24/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys32_lseek
25/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 25/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
26/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause 26/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
27/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice 27/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
@@ -96,7 +96,7 @@ sys_call_table:
96/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write 96/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
97/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link 97/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
98/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod 98/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
99/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek 99/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_nis_syscall, sys_lseek
100/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid 100/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
101/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall 101/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
102/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice 102/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 10f7bb9fc140..bdc05a21908b 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2548,15 +2548,6 @@ void __init trap_init(void)
2548 rwbuf_stkptrs) || 2548 rwbuf_stkptrs) ||
2549 TI_GSR != offsetof(struct thread_info, gsr) || 2549 TI_GSR != offsetof(struct thread_info, gsr) ||
2550 TI_XFSR != offsetof(struct thread_info, xfsr) || 2550 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2551 TI_USER_CNTD0 != offsetof(struct thread_info,
2552 user_cntd0) ||
2553 TI_USER_CNTD1 != offsetof(struct thread_info,
2554 user_cntd1) ||
2555 TI_KERN_CNTD0 != offsetof(struct thread_info,
2556 kernel_cntd0) ||
2557 TI_KERN_CNTD1 != offsetof(struct thread_info,
2558 kernel_cntd1) ||
2559 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2560 TI_PRE_COUNT != offsetof(struct thread_info, 2551 TI_PRE_COUNT != offsetof(struct thread_info,
2561 preempt_count) || 2552 preempt_count) ||
2562 TI_NEW_CHILD != offsetof(struct thread_info, new_child) || 2553 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index 4b7c937bba61..2d8b70d397f1 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -32,10 +32,9 @@ extern void prom_cif_interface(void);
32extern void prom_cif_callback(void); 32extern void prom_cif_callback(void);
33 33
34/* 34/*
35 * This provides SMP safety on the p1275buf. prom_callback() drops this lock 35 * This provides SMP safety on the p1275buf.
36 * to allow recursuve acquisition.
37 */ 36 */
38DEFINE_SPINLOCK(prom_entry_lock); 37DEFINE_RAW_SPINLOCK(prom_entry_lock);
39 38
40long p1275_cmd(const char *service, long fmt, ...) 39long p1275_cmd(const char *service, long fmt, ...)
41{ 40{
@@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long fmt, ...)
47 46
48 p = p1275buf.prom_buffer; 47 p = p1275buf.prom_buffer;
49 48
50 spin_lock_irqsave(&prom_entry_lock, flags); 49 raw_local_save_flags(flags);
50 raw_local_irq_restore(PIL_NMI);
51 raw_spin_lock(&prom_entry_lock);
51 52
52 p1275buf.prom_args[0] = (unsigned long)p; /* service */ 53 p1275buf.prom_args[0] = (unsigned long)p; /* service */
53 strcpy (p, service); 54 strcpy (p, service);
@@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long fmt, ...)
139 va_end(list); 140 va_end(list);
140 x = p1275buf.prom_args [nargs + 3]; 141 x = p1275buf.prom_args [nargs + 3];
141 142
142 spin_unlock_irqrestore(&prom_entry_lock, flags); 143 raw_spin_unlock(&prom_entry_lock);
144 raw_local_irq_restore(flags);
143 145
144 return x; 146 return x;
145} 147}
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 3b3c36601a7b..de317d0c3294 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -140,7 +140,7 @@ void mconsole_proc(struct mc_request *req)
140 goto out; 140 goto out;
141 } 141 }
142 142
143 err = may_open(&nd.path, MAY_READ, FMODE_READ); 143 err = may_open(&nd.path, MAY_READ, O_RDONLY);
144 if (result) { 144 if (result) {
145 mconsole_reply(req, "Failed to open file", 1, 0); 145 mconsole_reply(req, "Failed to open file", 1, 0);
146 path_put(&nd.path); 146 path_put(&nd.path);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0896008f7509..f15f37bfbd62 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,7 @@ config X86
31 select ARCH_WANT_FRAME_POINTERS 31 select ARCH_WANT_FRAME_POINTERS
32 select HAVE_DMA_ATTRS 32 select HAVE_DMA_ATTRS
33 select HAVE_KRETPROBES 33 select HAVE_KRETPROBES
34 select HAVE_OPTPROBES
34 select HAVE_FTRACE_MCOUNT_RECORD 35 select HAVE_FTRACE_MCOUNT_RECORD
35 select HAVE_DYNAMIC_FTRACE 36 select HAVE_DYNAMIC_FTRACE
36 select HAVE_FUNCTION_TRACER 37 select HAVE_FUNCTION_TRACER
@@ -184,6 +185,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
184config ARCH_SUPPORTS_DEBUG_PAGEALLOC 185config ARCH_SUPPORTS_DEBUG_PAGEALLOC
185 def_bool y 186 def_bool y
186 187
188config HAVE_EARLY_RES
189 def_bool y
190
187config HAVE_INTEL_TXT 191config HAVE_INTEL_TXT
188 def_bool y 192 def_bool y
189 depends on EXPERIMENTAL && DMAR && ACPI 193 depends on EXPERIMENTAL && DMAR && ACPI
@@ -569,6 +573,18 @@ config PARAVIRT_DEBUG
569 Enable to debug paravirt_ops internals. Specifically, BUG if 573 Enable to debug paravirt_ops internals. Specifically, BUG if
570 a paravirt_op is missing when it is called. 574 a paravirt_op is missing when it is called.
571 575
576config NO_BOOTMEM
577 default y
578 bool "Disable Bootmem code"
579 ---help---
580 Use early_res directly instead of bootmem before slab is ready.
581 - allocator (buddy) [generic]
582 - early allocator (bootmem) [generic]
583 - very early allocator (reserve_early*()) [x86]
584 - very very early allocator (early brk model) [x86]
585 So reduce one layer between early allocator to final allocator
586
587
572config MEMTEST 588config MEMTEST
573 bool "Memtest" 589 bool "Memtest"
574 ---help--- 590 ---help---
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 9046e4af66ce..280c019cfad8 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
327 current->mm->free_area_cache = TASK_UNMAPPED_BASE; 327 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
328 current->mm->cached_hole_size = 0; 328 current->mm->cached_hole_size = 0;
329 329
330 current->mm->mmap = NULL;
331 install_exec_creds(bprm); 330 install_exec_creds(bprm);
332 current->flags &= ~PF_FORKNOEXEC; 331 current->flags &= ~PF_FORKNOEXEC;
333 332
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 9f828f87ca35..493092efaa3b 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -11,6 +11,7 @@ header-y += sigcontext32.h
11header-y += ucontext.h 11header-y += ucontext.h
12header-y += processor-flags.h 12header-y += processor-flags.h
13header-y += hw_breakpoint.h 13header-y += hw_breakpoint.h
14header-y += hyperv.h
14 15
15unifdef-y += e820.h 16unifdef-y += e820.h
16unifdef-y += ist.h 17unifdef-y += ist.h
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index f1e253ceba4b..b09ec55650b3 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -165,10 +165,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
165 * invalid instruction possible) or if the instructions are changed from a 165 * invalid instruction possible) or if the instructions are changed from a
166 * consistent state to another consistent state atomically. 166 * consistent state to another consistent state atomically.
167 * More care must be taken when modifying code in the SMP case because of 167 * More care must be taken when modifying code in the SMP case because of
168 * Intel's errata. 168 * Intel's errata. text_poke_smp() takes care that errata, but still
169 * doesn't support NMI/MCE handler code modifying.
169 * On the local CPU you need to be protected again NMI or MCE handlers seeing an 170 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
170 * inconsistent instruction while you patch. 171 * inconsistent instruction while you patch.
171 */ 172 */
172extern void *text_poke(void *addr, const void *opcode, size_t len); 173extern void *text_poke(void *addr, const void *opcode, size_t len);
174extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
173 175
174#endif /* _ASM_X86_ALTERNATIVE_H */ 176#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 761249e396fe..0e22296790d3 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -111,11 +111,8 @@ extern unsigned long end_user_pfn;
111 111
112extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); 112extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
113extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); 113extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
114extern void reserve_early(u64 start, u64 end, char *name);
115extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
116extern void free_early(u64 start, u64 end);
117extern void early_res_to_bootmem(u64 start, u64 end);
118extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); 114extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
115#include <linux/early_res.h>
119 116
120extern unsigned long e820_end_of_ram_pfn(void); 117extern unsigned long e820_end_of_ram_pfn(void);
121extern unsigned long e820_end_of_low_ram_pfn(void); 118extern unsigned long e820_end_of_low_ram_pfn(void);
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 014c2b85ae45..a726650fc80f 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
66void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 66void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
67struct page *kmap_atomic_to_page(void *ptr); 67struct page *kmap_atomic_to_page(void *ptr);
68 68
69#ifndef CONFIG_PARAVIRT
70#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
71#endif
72
73#define flush_cache_kmaps() do { } while (0) 69#define flush_cache_kmaps() do { } while (0)
74 70
75extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, 71extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
new file mode 100644
index 000000000000..e153a2b3889a
--- /dev/null
+++ b/arch/x86/include/asm/hyperv.h
@@ -0,0 +1,186 @@
1#ifndef _ASM_X86_KVM_HYPERV_H
2#define _ASM_X86_KVM_HYPERV_H
3
4#include <linux/types.h>
5
6/*
7 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
8 * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
9 */
10#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
11#define HYPERV_CPUID_INTERFACE 0x40000001
12#define HYPERV_CPUID_VERSION 0x40000002
13#define HYPERV_CPUID_FEATURES 0x40000003
14#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
15#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
16
17/*
18 * Feature identification. EAX indicates which features are available
19 * to the partition based upon the current partition privileges.
20 */
21
22/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
23#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
24/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
25#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
26/*
27 * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
28 * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
29 */
30#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2)
31/*
32 * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
33 * HV_X64_MSR_STIMER3_COUNT) available
34 */
35#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
36/*
37 * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
38 * are available
39 */
40#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4)
41/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/
42#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5)
43/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/
44#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6)
45/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/
46#define HV_X64_MSR_RESET_AVAILABLE (1 << 7)
47 /*
48 * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
49 * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
50 * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
51 */
52#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
53
54/*
55 * Feature identification: EBX indicates which flags were specified at
56 * partition creation. The format is the same as the partition creation
57 * flag structure defined in section Partition Creation Flags.
58 */
59#define HV_X64_CREATE_PARTITIONS (1 << 0)
60#define HV_X64_ACCESS_PARTITION_ID (1 << 1)
61#define HV_X64_ACCESS_MEMORY_POOL (1 << 2)
62#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3)
63#define HV_X64_POST_MESSAGES (1 << 4)
64#define HV_X64_SIGNAL_EVENTS (1 << 5)
65#define HV_X64_CREATE_PORT (1 << 6)
66#define HV_X64_CONNECT_PORT (1 << 7)
67#define HV_X64_ACCESS_STATS (1 << 8)
68#define HV_X64_DEBUGGING (1 << 11)
69#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12)
70#define HV_X64_CONFIGURE_PROFILER (1 << 13)
71
72/*
73 * Feature identification. EDX indicates which miscellaneous features
74 * are available to the partition.
75 */
76/* The MWAIT instruction is available (per section MONITOR / MWAIT) */
77#define HV_X64_MWAIT_AVAILABLE (1 << 0)
78/* Guest debugging support is available */
79#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1)
80/* Performance Monitor support is available*/
81#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2)
82/* Support for physical CPU dynamic partitioning events is available*/
83#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3)
84/*
85 * Support for passing hypercall input parameter block via XMM
86 * registers is available
87 */
88#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
89/* Support for a virtual guest idle state is available */
90#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
91
92/*
93 * Implementation recommendations. Indicates which behaviors the hypervisor
94 * recommends the OS implement for optimal performance.
95 */
96 /*
97 * Recommend using hypercall for address space switches rather
98 * than MOV to CR3 instruction
99 */
100#define HV_X64_MWAIT_RECOMMENDED (1 << 0)
101/* Recommend using hypercall for local TLB flushes rather
102 * than INVLPG or MOV to CR3 instructions */
103#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
104/*
105 * Recommend using hypercall for remote TLB flushes rather
106 * than inter-processor interrupts
107 */
108#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2)
109/*
110 * Recommend using MSRs for accessing APIC registers
111 * EOI, ICR and TPR rather than their memory-mapped counterparts
112 */
113#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3)
114/* Recommend using the hypervisor-provided MSR to initiate a system RESET */
115#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4)
116/*
117 * Recommend using relaxed timing for this partition. If used,
118 * the VM should disable any watchdog timeouts that rely on the
119 * timely delivery of external interrupts
120 */
121#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
122
123/* MSR used to identify the guest OS. */
124#define HV_X64_MSR_GUEST_OS_ID 0x40000000
125
126/* MSR used to setup pages used to communicate with the hypervisor. */
127#define HV_X64_MSR_HYPERCALL 0x40000001
128
129/* MSR used to provide vcpu index */
130#define HV_X64_MSR_VP_INDEX 0x40000002
131
132/* Define the virtual APIC registers */
133#define HV_X64_MSR_EOI 0x40000070
134#define HV_X64_MSR_ICR 0x40000071
135#define HV_X64_MSR_TPR 0x40000072
136#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073
137
138/* Define synthetic interrupt controller model specific registers. */
139#define HV_X64_MSR_SCONTROL 0x40000080
140#define HV_X64_MSR_SVERSION 0x40000081
141#define HV_X64_MSR_SIEFP 0x40000082
142#define HV_X64_MSR_SIMP 0x40000083
143#define HV_X64_MSR_EOM 0x40000084
144#define HV_X64_MSR_SINT0 0x40000090
145#define HV_X64_MSR_SINT1 0x40000091
146#define HV_X64_MSR_SINT2 0x40000092
147#define HV_X64_MSR_SINT3 0x40000093
148#define HV_X64_MSR_SINT4 0x40000094
149#define HV_X64_MSR_SINT5 0x40000095
150#define HV_X64_MSR_SINT6 0x40000096
151#define HV_X64_MSR_SINT7 0x40000097
152#define HV_X64_MSR_SINT8 0x40000098
153#define HV_X64_MSR_SINT9 0x40000099
154#define HV_X64_MSR_SINT10 0x4000009A
155#define HV_X64_MSR_SINT11 0x4000009B
156#define HV_X64_MSR_SINT12 0x4000009C
157#define HV_X64_MSR_SINT13 0x4000009D
158#define HV_X64_MSR_SINT14 0x4000009E
159#define HV_X64_MSR_SINT15 0x4000009F
160
161
162#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
163#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
164#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
165 (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
166
167/* Declare the various hypercall operations. */
168#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008
169
170#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
171#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12
172#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \
173 (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
174
175#define HV_PROCESSOR_POWER_STATE_C0 0
176#define HV_PROCESSOR_POWER_STATE_C1 1
177#define HV_PROCESSOR_POWER_STATE_C2 2
178#define HV_PROCESSOR_POWER_STATE_C3 3
179
180/* hypercall status code */
181#define HV_STATUS_SUCCESS 0
182#define HV_STATUS_INVALID_HYPERCALL_CODE 2
183#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
184#define HV_STATUS_INVALID_ALIGNMENT 4
185
186#endif
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091eeb1f..7ec65b18085d 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
24#define SLAVE_ICW4_DEFAULT 0x01 24#define SLAVE_ICW4_DEFAULT 0x01
25#define PIC_ICW4_AEOI 2 25#define PIC_ICW4_AEOI 2
26 26
27extern spinlock_t i8259A_lock; 27extern raw_spinlock_t i8259A_lock;
28 28
29extern void init_8259A(int auto_eoi); 29extern void init_8259A(int auto_eoi);
30extern void enable_8259A_irq(unsigned int irq); 30extern void enable_8259A_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7c7c16cde1f8..5f61f6e0ffdd 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -160,6 +160,7 @@ extern int io_apic_get_redir_entries(int ioapic);
160struct io_apic_irq_attr; 160struct io_apic_irq_attr;
161extern int io_apic_set_pci_routing(struct device *dev, int irq, 161extern int io_apic_set_pci_routing(struct device *dev, int irq,
162 struct io_apic_irq_attr *irq_attr); 162 struct io_apic_irq_attr *irq_attr);
163void setup_IO_APIC_irq_extra(u32 gsi);
163extern int (*ioapic_renumber_irq)(int ioapic, int irq); 164extern int (*ioapic_renumber_irq)(int ioapic, int irq);
164extern void ioapic_init_mappings(void); 165extern void ioapic_init_mappings(void);
165extern void ioapic_insert_resources(void); 166extern void ioapic_insert_resources(void);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380b6ef8..262292729fc4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
48extern int vector_used_by_percpu_irq(unsigned int vector); 48extern int vector_used_by_percpu_irq(unsigned int vector);
49 49
50extern void init_ISA_irqs(void); 50extern void init_ISA_irqs(void);
51extern int nr_legacy_irqs;
51 52
52#endif /* _ASM_X86_IRQ_H */ 53#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4611f085cd43..8767d99c4f64 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -28,28 +28,33 @@
28#define MCE_VECTOR 0x12 28#define MCE_VECTOR 0x12
29 29
30/* 30/*
31 * IDT vectors usable for external interrupt sources start 31 * IDT vectors usable for external interrupt sources start at 0x20.
32 * at 0x20: 32 * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
33 */ 33 */
34#define FIRST_EXTERNAL_VECTOR 0x20 34#define FIRST_EXTERNAL_VECTOR 0x20
35 35/*
36#ifdef CONFIG_X86_32 36 * We start allocating at 0x21 to spread out vectors evenly between
37# define SYSCALL_VECTOR 0x80 37 * priority levels. (0x80 is the syscall vector)
38# define IA32_SYSCALL_VECTOR 0x80 38 */
39#else 39#define VECTOR_OFFSET_START 1
40# define IA32_SYSCALL_VECTOR 0x80
41#endif
42 40
43/* 41/*
44 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
45 * cleanup after irq migration. 43 * triggering cleanup after irq migration. 0x21-0x2f will still be used
44 * for device interrupts.
46 */ 45 */
47#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 46#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
48 47
48#define IA32_SYSCALL_VECTOR 0x80
49#ifdef CONFIG_X86_32
50# define SYSCALL_VECTOR 0x80
51#endif
52
49/* 53/*
50 * Vectors 0x30-0x3f are used for ISA interrupts. 54 * Vectors 0x30-0x3f are used for ISA interrupts.
55 * round up to the next 16-vector boundary
51 */ 56 */
52#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 57#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
53 58
54#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 59#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
55#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 60#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
@@ -120,13 +125,6 @@
120 */ 125 */
121#define MCE_SELF_VECTOR 0xeb 126#define MCE_SELF_VECTOR 0xeb
122 127
123/*
124 * First APIC vector available to drivers: (vectors 0x30-0xee) we
125 * start at 0x31(0x41) to spread out vectors evenly between priority
126 * levels. (0x80 is the syscall vector)
127 */
128#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
129
130#define NR_VECTORS 256 128#define NR_VECTORS 256
131 129
132#define FPU_IRQ 13 130#define FPU_IRQ 13
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
154 152
155#define NR_IRQS_LEGACY 16 153#define NR_IRQS_LEGACY 16
156 154
157#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
158#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 155#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
159 156
160#ifdef CONFIG_X86_IO_APIC 157#ifdef CONFIG_X86_IO_APIC
161# ifdef CONFIG_SPARSE_IRQ 158# ifdef CONFIG_SPARSE_IRQ
159# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
162# define NR_IRQS \ 160# define NR_IRQS \
163 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 161 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
164 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 162 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
165 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 163 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
166# else 164# else
167# if NR_CPUS < MAX_IO_APICS 165# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
168# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) 166# define NR_IRQS \
169# else 167 (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
170# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) 168 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
171# endif 169 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
172# endif 170# endif
173#else /* !CONFIG_X86_IO_APIC: */ 171#else /* !CONFIG_X86_IO_APIC: */
174# define NR_IRQS NR_IRQS_LEGACY 172# define NR_IRQS NR_IRQS_LEGACY
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4fe681de1e76..4ffa345a8ccb 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -32,7 +32,10 @@ struct kprobe;
32 32
33typedef u8 kprobe_opcode_t; 33typedef u8 kprobe_opcode_t;
34#define BREAKPOINT_INSTRUCTION 0xcc 34#define BREAKPOINT_INSTRUCTION 0xcc
35#define RELATIVEJUMP_INSTRUCTION 0xe9 35#define RELATIVEJUMP_OPCODE 0xe9
36#define RELATIVEJUMP_SIZE 5
37#define RELATIVECALL_OPCODE 0xe8
38#define RELATIVE_ADDR_SIZE 4
36#define MAX_INSN_SIZE 16 39#define MAX_INSN_SIZE 16
37#define MAX_STACK_SIZE 64 40#define MAX_STACK_SIZE 64
38#define MIN_STACK_SIZE(ADDR) \ 41#define MIN_STACK_SIZE(ADDR) \
@@ -44,6 +47,17 @@ typedef u8 kprobe_opcode_t;
44 47
45#define flush_insn_slot(p) do { } while (0) 48#define flush_insn_slot(p) do { } while (0)
46 49
50/* optinsn template addresses */
51extern kprobe_opcode_t optprobe_template_entry;
52extern kprobe_opcode_t optprobe_template_val;
53extern kprobe_opcode_t optprobe_template_call;
54extern kprobe_opcode_t optprobe_template_end;
55#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
56#define MAX_OPTINSN_SIZE \
57 (((unsigned long)&optprobe_template_end - \
58 (unsigned long)&optprobe_template_entry) + \
59 MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
60
47extern const int kretprobe_blacklist_size; 61extern const int kretprobe_blacklist_size;
48 62
49void arch_remove_kprobe(struct kprobe *p); 63void arch_remove_kprobe(struct kprobe *p);
@@ -64,6 +78,21 @@ struct arch_specific_insn {
64 int boostable; 78 int boostable;
65}; 79};
66 80
81struct arch_optimized_insn {
82 /* copy of the original instructions */
83 kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE];
84 /* detour code buffer */
85 kprobe_opcode_t *insn;
86 /* the size of instructions copied to detour code buffer */
87 size_t size;
88};
89
90/* Return true (!0) if optinsn is prepared for optimization. */
91static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
92{
93 return optinsn->size;
94}
95
67struct prev_kprobe { 96struct prev_kprobe {
68 struct kprobe *kp; 97 struct kprobe *kp;
69 unsigned long status; 98 unsigned long status;
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7c18e1230f54..7a6f54fa13ba 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
54struct x86_emulate_ops { 54struct x86_emulate_ops {
55 /* 55 /*
56 * read_std: Read bytes of standard (non-emulated/special) memory. 56 * read_std: Read bytes of standard (non-emulated/special) memory.
57 * Used for instruction fetch, stack operations, and others. 57 * Used for descriptor reading.
58 * @addr: [IN ] Linear address from which to read. 58 * @addr: [IN ] Linear address from which to read.
59 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 59 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
60 * @bytes: [IN ] Number of bytes to read from memory. 60 * @bytes: [IN ] Number of bytes to read from memory.
61 */ 61 */
62 int (*read_std)(unsigned long addr, void *val, 62 int (*read_std)(unsigned long addr, void *val,
63 unsigned int bytes, struct kvm_vcpu *vcpu); 63 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
64
65 /*
66 * fetch: Read bytes of standard (non-emulated/special) memory.
67 * Used for instruction fetch.
68 * @addr: [IN ] Linear address from which to read.
69 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
70 * @bytes: [IN ] Number of bytes to read from memory.
71 */
72 int (*fetch)(unsigned long addr, void *val,
73 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
64 74
65 /* 75 /*
66 * read_emulated: Read bytes from emulated/special memory area. 76 * read_emulated: Read bytes from emulated/special memory area.
@@ -74,7 +84,7 @@ struct x86_emulate_ops {
74 struct kvm_vcpu *vcpu); 84 struct kvm_vcpu *vcpu);
75 85
76 /* 86 /*
77 * write_emulated: Read bytes from emulated/special memory area. 87 * write_emulated: Write bytes to emulated/special memory area.
78 * @addr: [IN ] Linear address to which to write. 88 * @addr: [IN ] Linear address to which to write.
79 * @val: [IN ] Value to write to memory (low-order bytes used as 89 * @val: [IN ] Value to write to memory (low-order bytes used as
80 * required). 90 * required).
@@ -168,6 +178,7 @@ struct x86_emulate_ctxt {
168 178
169/* Execution mode, passed to the emulator. */ 179/* Execution mode, passed to the emulator. */
170#define X86EMUL_MODE_REAL 0 /* Real mode. */ 180#define X86EMUL_MODE_REAL 0 /* Real mode. */
181#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
171#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ 182#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
172#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ 183#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
173#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ 184#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4f865e8b8540..06d9e79ca37d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -25,7 +25,7 @@
25#include <asm/mtrr.h> 25#include <asm/mtrr.h>
26#include <asm/msr-index.h> 26#include <asm/msr-index.h>
27 27
28#define KVM_MAX_VCPUS 16 28#define KVM_MAX_VCPUS 64
29#define KVM_MEMORY_SLOTS 32 29#define KVM_MEMORY_SLOTS 32
30/* memory slots that does not exposed to userspace */ 30/* memory slots that does not exposed to userspace */
31#define KVM_PRIVATE_MEM_SLOTS 4 31#define KVM_PRIVATE_MEM_SLOTS 4
@@ -38,19 +38,6 @@
38#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 38#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
39 0xFFFFFF0000000000ULL) 39 0xFFFFFF0000000000ULL)
40 40
41#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
42 (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
43#define KVM_GUEST_CR0_MASK \
44 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
45#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
46 (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
47#define KVM_VM_CR0_ALWAYS_ON \
48 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
49#define KVM_GUEST_CR4_MASK \
50 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
51#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
52#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
53
54#define INVALID_PAGE (~(hpa_t)0) 41#define INVALID_PAGE (~(hpa_t)0)
55#define UNMAPPED_GVA (~(gpa_t)0) 42#define UNMAPPED_GVA (~(gpa_t)0)
56 43
@@ -256,7 +243,8 @@ struct kvm_mmu {
256 void (*new_cr3)(struct kvm_vcpu *vcpu); 243 void (*new_cr3)(struct kvm_vcpu *vcpu);
257 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
258 void (*free)(struct kvm_vcpu *vcpu); 245 void (*free)(struct kvm_vcpu *vcpu);
259 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 246 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
247 u32 *error);
260 void (*prefetch_page)(struct kvm_vcpu *vcpu, 248 void (*prefetch_page)(struct kvm_vcpu *vcpu,
261 struct kvm_mmu_page *page); 249 struct kvm_mmu_page *page);
262 int (*sync_page)(struct kvm_vcpu *vcpu, 250 int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -282,13 +270,15 @@ struct kvm_vcpu_arch {
282 u32 regs_dirty; 270 u32 regs_dirty;
283 271
284 unsigned long cr0; 272 unsigned long cr0;
273 unsigned long cr0_guest_owned_bits;
285 unsigned long cr2; 274 unsigned long cr2;
286 unsigned long cr3; 275 unsigned long cr3;
287 unsigned long cr4; 276 unsigned long cr4;
277 unsigned long cr4_guest_owned_bits;
288 unsigned long cr8; 278 unsigned long cr8;
289 u32 hflags; 279 u32 hflags;
290 u64 pdptrs[4]; /* pae */ 280 u64 pdptrs[4]; /* pae */
291 u64 shadow_efer; 281 u64 efer;
292 u64 apic_base; 282 u64 apic_base;
293 struct kvm_lapic *apic; /* kernel irqchip context */ 283 struct kvm_lapic *apic; /* kernel irqchip context */
294 int32_t apic_arb_prio; 284 int32_t apic_arb_prio;
@@ -374,17 +364,27 @@ struct kvm_vcpu_arch {
374 /* used for guest single stepping over the given code position */ 364 /* used for guest single stepping over the given code position */
375 u16 singlestep_cs; 365 u16 singlestep_cs;
376 unsigned long singlestep_rip; 366 unsigned long singlestep_rip;
367 /* fields used by HYPER-V emulation */
368 u64 hv_vapic;
377}; 369};
378 370
379struct kvm_mem_alias { 371struct kvm_mem_alias {
380 gfn_t base_gfn; 372 gfn_t base_gfn;
381 unsigned long npages; 373 unsigned long npages;
382 gfn_t target_gfn; 374 gfn_t target_gfn;
375#define KVM_ALIAS_INVALID 1UL
376 unsigned long flags;
383}; 377};
384 378
385struct kvm_arch{ 379#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
386 int naliases; 380
381struct kvm_mem_aliases {
387 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; 382 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
383 int naliases;
384};
385
386struct kvm_arch {
387 struct kvm_mem_aliases *aliases;
388 388
389 unsigned int n_free_mmu_pages; 389 unsigned int n_free_mmu_pages;
390 unsigned int n_requested_mmu_pages; 390 unsigned int n_requested_mmu_pages;
@@ -416,6 +416,10 @@ struct kvm_arch{
416 s64 kvmclock_offset; 416 s64 kvmclock_offset;
417 417
418 struct kvm_xen_hvm_config xen_hvm_config; 418 struct kvm_xen_hvm_config xen_hvm_config;
419
420 /* fields used by HYPER-V emulation */
421 u64 hv_guest_os_id;
422 u64 hv_hypercall;
419}; 423};
420 424
421struct kvm_vm_stat { 425struct kvm_vm_stat {
@@ -471,6 +475,7 @@ struct kvm_x86_ops {
471 int (*hardware_setup)(void); /* __init */ 475 int (*hardware_setup)(void); /* __init */
472 void (*hardware_unsetup)(void); /* __exit */ 476 void (*hardware_unsetup)(void); /* __exit */
473 bool (*cpu_has_accelerated_tpr)(void); 477 bool (*cpu_has_accelerated_tpr)(void);
478 void (*cpuid_update)(struct kvm_vcpu *vcpu);
474 479
475 /* Create, but do not attach this VCPU */ 480 /* Create, but do not attach this VCPU */
476 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 481 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
@@ -492,6 +497,7 @@ struct kvm_x86_ops {
492 void (*set_segment)(struct kvm_vcpu *vcpu, 497 void (*set_segment)(struct kvm_vcpu *vcpu,
493 struct kvm_segment *var, int seg); 498 struct kvm_segment *var, int seg);
494 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 499 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
500 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
495 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 501 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
496 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 502 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
497 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 503 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -501,12 +507,13 @@ struct kvm_x86_ops {
501 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 507 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
502 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 508 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
503 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 509 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
504 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); 510 int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
505 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, 511 int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
506 int *exception);
507 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 512 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
508 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 513 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
509 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 514 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
515 void (*fpu_activate)(struct kvm_vcpu *vcpu);
516 void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
510 517
511 void (*tlb_flush)(struct kvm_vcpu *vcpu); 518 void (*tlb_flush)(struct kvm_vcpu *vcpu);
512 519
@@ -531,7 +538,8 @@ struct kvm_x86_ops {
531 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 538 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
532 int (*get_tdp_level)(void); 539 int (*get_tdp_level)(void);
533 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 540 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
534 bool (*gb_page_enable)(void); 541 int (*get_lpage_level)(void);
542 bool (*rdtscp_supported)(void);
535 543
536 const struct trace_print_flags *exit_reasons_str; 544 const struct trace_print_flags *exit_reasons_str;
537}; 545};
@@ -606,8 +614,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
606 unsigned long value); 614 unsigned long value);
607 615
608void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 616void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
609int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 617int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
610 int type_bits, int seg);
611 618
612int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 619int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
613 620
@@ -653,6 +660,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
653int kvm_mmu_load(struct kvm_vcpu *vcpu); 660int kvm_mmu_load(struct kvm_vcpu *vcpu);
654void kvm_mmu_unload(struct kvm_vcpu *vcpu); 661void kvm_mmu_unload(struct kvm_vcpu *vcpu);
655void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 662void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
663gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
664gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
665gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
666gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
656 667
657int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 668int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
658 669
@@ -666,6 +677,7 @@ void kvm_disable_tdp(void);
666 677
667int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 678int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
668int complete_pio(struct kvm_vcpu *vcpu); 679int complete_pio(struct kvm_vcpu *vcpu);
680bool kvm_check_iopl(struct kvm_vcpu *vcpu);
669 681
670struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); 682struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
671 683
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index c584076a47f4..ffae1420e7d7 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -2,6 +2,7 @@
2#define _ASM_X86_KVM_PARA_H 2#define _ASM_X86_KVM_PARA_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/hyperv.h>
5 6
6/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 7/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
7 * should be used to determine that a VM is running under KVM. 8 * should be used to determine that a VM is running under KVM.
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 47b9b6f19057..2e9972468a5d 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l)
195#define __local_add(i, l) local_add((i), (l)) 195#define __local_add(i, l) local_add((i), (l))
196#define __local_sub(i, l) local_sub((i), (l)) 196#define __local_sub(i, l) local_sub((i), (l))
197 197
198/* Use these for per-cpu local_t variables: on some archs they are
199 * much more efficient than these naive implementations. Note they take
200 * a variable, not an address.
201 *
202 * X86_64: This could be done better if we moved the per cpu data directly
203 * after GS.
204 */
205
206/* Need to disable preemption for the cpu local counters otherwise we could
207 still access a variable of a previous CPU in a non atomic way. */
208#define cpu_local_wrap_v(l) \
209({ \
210 local_t res__; \
211 preempt_disable(); \
212 res__ = (l); \
213 preempt_enable(); \
214 res__; \
215})
216#define cpu_local_wrap(l) \
217({ \
218 preempt_disable(); \
219 (l); \
220 preempt_enable(); \
221}) \
222
223#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
224#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
225#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
226#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
227#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
228#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
229
230#define __cpu_local_inc(l) cpu_local_inc((l))
231#define __cpu_local_dec(l) cpu_local_dec((l))
232#define __cpu_local_add(i, l) cpu_local_add((i), (l))
233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
234
235#endif /* _ASM_X86_LOCAL_H */ 198#endif /* _ASM_X86_LOCAL_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index dd59a85a918f..5653f43d90e5 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn)
435 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 435 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
436} 436}
437 437
438#ifdef CONFIG_HIGHPTE
439static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
440{
441 unsigned long ret;
442 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
443 return (void *)ret;
444}
445#endif
446
447static inline void pte_update(struct mm_struct *mm, unsigned long addr, 438static inline void pte_update(struct mm_struct *mm, unsigned long addr,
448 pte_t *ptep) 439 pte_t *ptep)
449{ 440{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b1e70d51e40c..db9ef5532341 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -304,10 +304,6 @@ struct pv_mmu_ops {
304#endif /* PAGETABLE_LEVELS == 4 */ 304#endif /* PAGETABLE_LEVELS == 4 */
305#endif /* PAGETABLE_LEVELS >= 3 */ 305#endif /* PAGETABLE_LEVELS >= 3 */
306 306
307#ifdef CONFIG_HIGHPTE
308 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
309#endif
310
311 struct pv_lazy_ops lazy_mode; 307 struct pv_lazy_ops lazy_mode;
312 308
313 /* dom0 ops */ 309 /* dom0 ops */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index ada8c201d513..b4a00dd4eed5 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -124,6 +124,8 @@ extern void pci_iommu_alloc(void);
124#include "pci_64.h" 124#include "pci_64.h"
125#endif 125#endif
126 126
127void dma32_reserve_bootmem(void);
128
127/* implement the pci_ DMA API in terms of the generic device dma_ one */ 129/* implement the pci_ DMA API in terms of the generic device dma_ one */
128#include <asm-generic/pci-dma-compat.h> 130#include <asm-generic/pci-dma-compat.h>
129 131
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
index ae5e40f67daf..fe15cfb21b9b 100644
--- a/arch/x86/include/asm/pci_64.h
+++ b/arch/x86/include/asm/pci_64.h
@@ -22,8 +22,6 @@ extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
22extern int (*pci_config_write)(int seg, int bus, int dev, int fn, 22extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
23 int reg, int len, u32 value); 23 int reg, int len, u32 value);
24 24
25extern void dma32_reserve_bootmem(void);
26
27#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
28 26
29#endif /* _ASM_X86_PCI_64_H */ 27#endif /* _ASM_X86_PCI_64_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0c44196b78ac..66a272dfd8b8 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -25,19 +25,18 @@
25 */ 25 */
26#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
27#define PER_CPU(var, reg) \ 27#define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ 28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea per_cpu__##var(reg), reg 29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var 30#define PER_CPU_VAR(var) %__percpu_seg:var
31#else /* ! SMP */ 31#else /* ! SMP */
32#define PER_CPU(var, reg) \ 32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33 __percpu_mov_op $per_cpu__##var, reg 33#define PER_CPU_VAR(var) var
34#define PER_CPU_VAR(var) per_cpu__##var
35#endif /* SMP */ 34#endif /* SMP */
36 35
37#ifdef CONFIG_X86_64_SMP 36#ifdef CONFIG_X86_64_SMP
38#define INIT_PER_CPU_VAR(var) init_per_cpu__##var 37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39#else 38#else
40#define INIT_PER_CPU_VAR(var) per_cpu__##var 39#define INIT_PER_CPU_VAR(var) var
41#endif 40#endif
42 41
43#else /* ...!ASSEMBLY */ 42#else /* ...!ASSEMBLY */
@@ -60,12 +59,12 @@
60 * There also must be an entry in vmlinux_64.lds.S 59 * There also must be an entry in vmlinux_64.lds.S
61 */ 60 */
62#define DECLARE_INIT_PER_CPU(var) \ 61#define DECLARE_INIT_PER_CPU(var) \
63 extern typeof(per_cpu_var(var)) init_per_cpu_var(var) 62 extern typeof(var) init_per_cpu_var(var)
64 63
65#ifdef CONFIG_X86_64_SMP 64#ifdef CONFIG_X86_64_SMP
66#define init_per_cpu_var(var) init_per_cpu__##var 65#define init_per_cpu_var(var) init_per_cpu__##var
67#else 66#else
68#define init_per_cpu_var(var) per_cpu_var(var) 67#define init_per_cpu_var(var) var
69#endif 68#endif
70 69
71/* For arch-specific code, we can use direct single-insn ops (they 70/* For arch-specific code, we can use direct single-insn ops (they
@@ -104,6 +103,64 @@ do { \
104 } \ 103 } \
105} while (0) 104} while (0)
106 105
106/*
107 * Generate a percpu add to memory instruction and optimize code
108 * if a one is added or subtracted.
109 */
110#define percpu_add_op(var, val) \
111do { \
112 typedef typeof(var) pao_T__; \
113 const int pao_ID__ = (__builtin_constant_p(val) && \
114 ((val) == 1 || (val) == -1)) ? (val) : 0; \
115 if (0) { \
116 pao_T__ pao_tmp__; \
117 pao_tmp__ = (val); \
118 } \
119 switch (sizeof(var)) { \
120 case 1: \
121 if (pao_ID__ == 1) \
122 asm("incb "__percpu_arg(0) : "+m" (var)); \
123 else if (pao_ID__ == -1) \
124 asm("decb "__percpu_arg(0) : "+m" (var)); \
125 else \
126 asm("addb %1, "__percpu_arg(0) \
127 : "+m" (var) \
128 : "qi" ((pao_T__)(val))); \
129 break; \
130 case 2: \
131 if (pao_ID__ == 1) \
132 asm("incw "__percpu_arg(0) : "+m" (var)); \
133 else if (pao_ID__ == -1) \
134 asm("decw "__percpu_arg(0) : "+m" (var)); \
135 else \
136 asm("addw %1, "__percpu_arg(0) \
137 : "+m" (var) \
138 : "ri" ((pao_T__)(val))); \
139 break; \
140 case 4: \
141 if (pao_ID__ == 1) \
142 asm("incl "__percpu_arg(0) : "+m" (var)); \
143 else if (pao_ID__ == -1) \
144 asm("decl "__percpu_arg(0) : "+m" (var)); \
145 else \
146 asm("addl %1, "__percpu_arg(0) \
147 : "+m" (var) \
148 : "ri" ((pao_T__)(val))); \
149 break; \
150 case 8: \
151 if (pao_ID__ == 1) \
152 asm("incq "__percpu_arg(0) : "+m" (var)); \
153 else if (pao_ID__ == -1) \
154 asm("decq "__percpu_arg(0) : "+m" (var)); \
155 else \
156 asm("addq %1, "__percpu_arg(0) \
157 : "+m" (var) \
158 : "re" ((pao_T__)(val))); \
159 break; \
160 default: __bad_percpu_size(); \
161 } \
162} while (0)
163
107#define percpu_from_op(op, var, constraint) \ 164#define percpu_from_op(op, var, constraint) \
108({ \ 165({ \
109 typeof(var) pfo_ret__; \ 166 typeof(var) pfo_ret__; \
@@ -142,16 +199,14 @@ do { \
142 * per-thread variables implemented as per-cpu variables and thus 199 * per-thread variables implemented as per-cpu variables and thus
143 * stable for the duration of the respective task. 200 * stable for the duration of the respective task.
144 */ 201 */
145#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ 202#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
146 "m" (per_cpu__##var)) 203#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
147#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ 204#define percpu_write(var, val) percpu_to_op("mov", var, val)
148 "p" (&per_cpu__##var)) 205#define percpu_add(var, val) percpu_add_op(var, val)
149#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) 206#define percpu_sub(var, val) percpu_add_op(var, -(val))
150#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) 207#define percpu_and(var, val) percpu_to_op("and", var, val)
151#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) 208#define percpu_or(var, val) percpu_to_op("or", var, val)
152#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) 209#define percpu_xor(var, val) percpu_to_op("xor", var, val)
153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
155 210
156#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 211#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
157#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 212#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -160,9 +215,9 @@ do { \
160#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) 215#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
161#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) 216#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
162#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) 217#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
163#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) 218#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
164#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) 219#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
165#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) 220#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
166#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 221#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
167#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 222#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
168#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 223#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -179,9 +234,9 @@ do { \
179#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) 234#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
180#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) 235#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
181#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) 236#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
182#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) 237#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
183#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) 238#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
184#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) 239#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
185#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 240#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
186#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 241#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
187#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 242#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -192,9 +247,9 @@ do { \
192#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 247#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
193#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 248#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
194 249
195#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) 250#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
196#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) 251#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
197#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) 252#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
198#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 253#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
199#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 254#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
200#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 255#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -212,19 +267,19 @@ do { \
212#ifdef CONFIG_X86_64 267#ifdef CONFIG_X86_64
213#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 268#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
214#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 269#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
215#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) 270#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
216#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 271#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
217#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 272#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
218#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 273#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
219 274
220#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 275#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
221#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 276#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
222#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) 277#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
223#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 278#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
224#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 279#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
225#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 280#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
226 281
227#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) 282#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
228#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 283#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
229#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 284#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
230#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 285#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
@@ -236,7 +291,7 @@ do { \
236({ \ 291({ \
237 int old__; \ 292 int old__; \
238 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ 293 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
239 : "=r" (old__), "+m" (per_cpu__##var) \ 294 : "=r" (old__), "+m" (var) \
240 : "dIr" (bit)); \ 295 : "dIr" (bit)); \
241 old__; \ 296 old__; \
242}) 297})
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index a28668396508..47339a1ac7b6 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
54 in_irq() ? KM_IRQ_PTE : \ 54 in_irq() ? KM_IRQ_PTE : \
55 KM_PTE0) 55 KM_PTE0)
56#define pte_offset_map(dir, address) \ 56#define pte_offset_map(dir, address) \
57 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \ 57 ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
58 pte_index((address))) 58 pte_index((address)))
59#define pte_offset_map_nested(dir, address) \ 59#define pte_offset_map_nested(dir, address) \
60 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ 60 ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
61 pte_index((address))) 61 pte_index((address)))
62#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) 62#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
63#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) 63#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 4009f6534f52..6f414ed88620 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -23,14 +23,4 @@ extern int reboot_force;
23 23
24long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 24long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
25 25
26/*
27 * This looks more complex than it should be. But we need to
28 * get the type for the ~ right in round_down (it needs to be
29 * as wide as the result!), and we want to evaluate the macro
30 * arguments just once each.
31 */
32#define __round_mask(x,y) ((__typeof__(x))((y)-1))
33#define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1)
34#define round_down(x,y) ((x) & ~__round_mask(x,y))
35
36#endif /* _ASM_X86_PROTO_H */ 26#endif /* _ASM_X86_PROTO_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 1fecb7e61130..38638cd2fa4c 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -313,7 +313,7 @@ struct __attribute__ ((__packed__)) vmcb {
313 313
314#define SVM_EXIT_ERR -1 314#define SVM_EXIT_ERR -1
315 315
316#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ 316#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
317 317
318#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" 318#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
319#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" 319#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index e04740f7a0bb..b8fe48ee2ed9 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -32,7 +32,7 @@ extern void show_regs_common(void);
32 "movl %P[task_canary](%[next]), %%ebx\n\t" \ 32 "movl %P[task_canary](%[next]), %%ebx\n\t" \
33 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 33 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
34#define __switch_canary_oparam \ 34#define __switch_canary_oparam \
35 , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) 35 , [stack_canary] "=m" (stack_canary.canary)
36#define __switch_canary_iparam \ 36#define __switch_canary_iparam \
37 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 37 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
38#else /* CC_STACKPROTECTOR */ 38#else /* CC_STACKPROTECTOR */
@@ -114,7 +114,7 @@ do { \
114 "movq %P[task_canary](%%rsi),%%r8\n\t" \ 114 "movq %P[task_canary](%%rsi),%%r8\n\t" \
115 "movq %%r8,"__percpu_arg([gs_canary])"\n\t" 115 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
116#define __switch_canary_oparam \ 116#define __switch_canary_oparam \
117 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) 117 , [gs_canary] "=m" (irq_stack_union.stack_canary)
118#define __switch_canary_iparam \ 118#define __switch_canary_iparam \
119 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 119 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
120#else /* CC_STACKPROTECTOR */ 120#else /* CC_STACKPROTECTOR */
@@ -133,7 +133,7 @@ do { \
133 __switch_canary \ 133 __switch_canary \
134 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 134 "movq %P[thread_info](%%rsi),%%r8\n\t" \
135 "movq %%rax,%%rdi\n\t" \ 135 "movq %%rax,%%rdi\n\t" \
136 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 136 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
137 "jnz ret_from_fork\n\t" \ 137 "jnz ret_from_fork\n\t" \
138 RESTORE_CONTEXT \ 138 RESTORE_CONTEXT \
139 : "=a" (last) \ 139 : "=a" (last) \
@@ -143,7 +143,7 @@ do { \
143 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 143 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
144 [_tif_fork] "i" (_TIF_FORK), \ 144 [_tif_fork] "i" (_TIF_FORK), \
145 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 145 [thread_info] "i" (offsetof(struct task_struct, stack)), \
146 [current_task] "m" (per_cpu_var(current_task)) \ 146 [current_task] "m" (current_task) \
147 __switch_canary_iparam \ 147 __switch_canary_iparam \
148 : "memory", "cc" __EXTRA_CLOBBER) 148 : "memory", "cc" __EXTRA_CLOBBER)
149#endif 149#endif
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 2b4945419a84..fb9a080740ec 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -53,6 +53,7 @@
53 */ 53 */
54#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 54#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
55#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 55#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
56#define SECONDARY_EXEC_RDTSCP 0x00000008
56#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 57#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
57#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 58#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
58#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 59#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
@@ -251,6 +252,7 @@ enum vmcs_field {
251#define EXIT_REASON_MSR_READ 31 252#define EXIT_REASON_MSR_READ 31
252#define EXIT_REASON_MSR_WRITE 32 253#define EXIT_REASON_MSR_WRITE 32
253#define EXIT_REASON_MWAIT_INSTRUCTION 36 254#define EXIT_REASON_MWAIT_INSTRUCTION 36
255#define EXIT_REASON_MONITOR_INSTRUCTION 39
254#define EXIT_REASON_PAUSE_INSTRUCTION 40 256#define EXIT_REASON_PAUSE_INSTRUCTION 40
255#define EXIT_REASON_MCE_DURING_VMENTRY 41 257#define EXIT_REASON_MCE_DURING_VMENTRY 41
256#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 258#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
@@ -362,6 +364,7 @@ enum vmcs_field {
362#define VMX_EPTP_UC_BIT (1ull << 8) 364#define VMX_EPTP_UC_BIT (1ull << 8)
363#define VMX_EPTP_WB_BIT (1ull << 14) 365#define VMX_EPTP_WB_BIT (1ull << 14)
364#define VMX_EPT_2MB_PAGE_BIT (1ull << 16) 366#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
367#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
365#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) 368#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
366#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 369#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
367#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 370#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
@@ -374,7 +377,7 @@ enum vmcs_field {
374#define VMX_EPT_READABLE_MASK 0x1ull 377#define VMX_EPT_READABLE_MASK 0x1ull
375#define VMX_EPT_WRITABLE_MASK 0x2ull 378#define VMX_EPT_WRITABLE_MASK 0x2ull
376#define VMX_EPT_EXECUTABLE_MASK 0x4ull 379#define VMX_EPT_EXECUTABLE_MASK 0x4ull
377#define VMX_EPT_IGMT_BIT (1ull << 6) 380#define VMX_EPT_IPAT_BIT (1ull << 6)
378 381
379#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 382#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
380 383
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f95703098f8d..738fcb60e708 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -447,6 +447,12 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
448{ 448{
449 *irq = gsi; 449 *irq = gsi;
450
451#ifdef CONFIG_X86_IO_APIC
452 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
453 setup_IO_APIC_irq_extra(gsi);
454#endif
455
450 return 0; 456 return 0;
451} 457}
452 458
@@ -474,7 +480,8 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
474 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); 480 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
475 } 481 }
476#endif 482#endif
477 acpi_gsi_to_irq(plat_gsi, &irq); 483 irq = plat_gsi;
484
478 return irq; 485 return irq;
479} 486}
480 487
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index e6ea0342c8f8..3a4bf35c179b 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -7,6 +7,7 @@
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
9#include <linux/memory.h> 9#include <linux/memory.h>
10#include <linux/stop_machine.h>
10#include <asm/alternative.h> 11#include <asm/alternative.h>
11#include <asm/sections.h> 12#include <asm/sections.h>
12#include <asm/pgtable.h> 13#include <asm/pgtable.h>
@@ -572,3 +573,62 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
572 local_irq_restore(flags); 573 local_irq_restore(flags);
573 return addr; 574 return addr;
574} 575}
576
577/*
578 * Cross-modifying kernel text with stop_machine().
579 * This code originally comes from immediate value.
580 */
581static atomic_t stop_machine_first;
582static int wrote_text;
583
584struct text_poke_params {
585 void *addr;
586 const void *opcode;
587 size_t len;
588};
589
590static int __kprobes stop_machine_text_poke(void *data)
591{
592 struct text_poke_params *tpp = data;
593
594 if (atomic_dec_and_test(&stop_machine_first)) {
595 text_poke(tpp->addr, tpp->opcode, tpp->len);
596 smp_wmb(); /* Make sure other cpus see that this has run */
597 wrote_text = 1;
598 } else {
599 while (!wrote_text)
600 cpu_relax();
601 smp_mb(); /* Load wrote_text before following execution */
602 }
603
604 flush_icache_range((unsigned long)tpp->addr,
605 (unsigned long)tpp->addr + tpp->len);
606 return 0;
607}
608
609/**
610 * text_poke_smp - Update instructions on a live kernel on SMP
611 * @addr: address to modify
612 * @opcode: source of the copy
613 * @len: length to copy
614 *
615 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
616 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
617 * should be allowed, since stop_machine() does _not_ protect code against
618 * NMI and MCE.
619 *
620 * Note: Must be called under get_online_cpus() and text_mutex.
621 */
622void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
623{
624 struct text_poke_params tpp;
625
626 tpp.addr = addr;
627 tpp.opcode = opcode;
628 tpp.len = len;
629 atomic_set(&stop_machine_first, 1);
630 wrote_text = 0;
631 stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
632 return addr;
633}
634
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6bdd2c7ead75..14862f11cc4a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
73 */ 73 */
74int sis_apic_bug = -1; 74int sis_apic_bug = -1;
75 75
76static DEFINE_SPINLOCK(ioapic_lock); 76static DEFINE_RAW_SPINLOCK(ioapic_lock);
77static DEFINE_SPINLOCK(vector_lock); 77static DEFINE_RAW_SPINLOCK(vector_lock);
78 78
79/* 79/*
80 * # of IRQ routing registers 80 * # of IRQ routing registers
@@ -94,8 +94,6 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
94/* # of MP IRQ source entries */ 94/* # of MP IRQ source entries */
95int mp_irq_entries; 95int mp_irq_entries;
96 96
97/* Number of legacy interrupts */
98static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
99/* GSI interrupts */ 97/* GSI interrupts */
100static int nr_irqs_gsi = NR_IRQS_LEGACY; 98static int nr_irqs_gsi = NR_IRQS_LEGACY;
101 99
@@ -140,27 +138,10 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
140 138
141/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 139/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
142#ifdef CONFIG_SPARSE_IRQ 140#ifdef CONFIG_SPARSE_IRQ
143static struct irq_cfg irq_cfgx[] = { 141static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
144#else 142#else
145static struct irq_cfg irq_cfgx[NR_IRQS] = { 143static struct irq_cfg irq_cfgx[NR_IRQS];
146#endif 144#endif
147 [0] = { .vector = IRQ0_VECTOR, },
148 [1] = { .vector = IRQ1_VECTOR, },
149 [2] = { .vector = IRQ2_VECTOR, },
150 [3] = { .vector = IRQ3_VECTOR, },
151 [4] = { .vector = IRQ4_VECTOR, },
152 [5] = { .vector = IRQ5_VECTOR, },
153 [6] = { .vector = IRQ6_VECTOR, },
154 [7] = { .vector = IRQ7_VECTOR, },
155 [8] = { .vector = IRQ8_VECTOR, },
156 [9] = { .vector = IRQ9_VECTOR, },
157 [10] = { .vector = IRQ10_VECTOR, },
158 [11] = { .vector = IRQ11_VECTOR, },
159 [12] = { .vector = IRQ12_VECTOR, },
160 [13] = { .vector = IRQ13_VECTOR, },
161 [14] = { .vector = IRQ14_VECTOR, },
162 [15] = { .vector = IRQ15_VECTOR, },
163};
164 145
165void __init io_apic_disable_legacy(void) 146void __init io_apic_disable_legacy(void)
166{ 147{
@@ -185,8 +166,14 @@ int __init arch_early_irq_init(void)
185 desc->chip_data = &cfg[i]; 166 desc->chip_data = &cfg[i];
186 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 167 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
187 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 168 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
188 if (i < nr_legacy_irqs) 169 /*
189 cpumask_setall(cfg[i].domain); 170 * For legacy IRQ's, start with assigning irq0 to irq15 to
171 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
172 */
173 if (i < nr_legacy_irqs) {
174 cfg[i].vector = IRQ0_VECTOR + i;
175 cpumask_set_cpu(0, cfg[i].domain);
176 }
190 } 177 }
191 178
192 return 0; 179 return 0;
@@ -406,7 +393,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
406 struct irq_pin_list *entry; 393 struct irq_pin_list *entry;
407 unsigned long flags; 394 unsigned long flags;
408 395
409 spin_lock_irqsave(&ioapic_lock, flags); 396 raw_spin_lock_irqsave(&ioapic_lock, flags);
410 for_each_irq_pin(entry, cfg->irq_2_pin) { 397 for_each_irq_pin(entry, cfg->irq_2_pin) {
411 unsigned int reg; 398 unsigned int reg;
412 int pin; 399 int pin;
@@ -415,11 +402,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
415 reg = io_apic_read(entry->apic, 0x10 + pin*2); 402 reg = io_apic_read(entry->apic, 0x10 + pin*2);
416 /* Is the remote IRR bit set? */ 403 /* Is the remote IRR bit set? */
417 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 404 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
418 spin_unlock_irqrestore(&ioapic_lock, flags); 405 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
419 return true; 406 return true;
420 } 407 }
421 } 408 }
422 spin_unlock_irqrestore(&ioapic_lock, flags); 409 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
423 410
424 return false; 411 return false;
425} 412}
@@ -433,10 +420,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
433{ 420{
434 union entry_union eu; 421 union entry_union eu;
435 unsigned long flags; 422 unsigned long flags;
436 spin_lock_irqsave(&ioapic_lock, flags); 423 raw_spin_lock_irqsave(&ioapic_lock, flags);
437 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 424 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
438 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 425 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
439 spin_unlock_irqrestore(&ioapic_lock, flags); 426 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
440 return eu.entry; 427 return eu.entry;
441} 428}
442 429
@@ -459,9 +446,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
459void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 446void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
460{ 447{
461 unsigned long flags; 448 unsigned long flags;
462 spin_lock_irqsave(&ioapic_lock, flags); 449 raw_spin_lock_irqsave(&ioapic_lock, flags);
463 __ioapic_write_entry(apic, pin, e); 450 __ioapic_write_entry(apic, pin, e);
464 spin_unlock_irqrestore(&ioapic_lock, flags); 451 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
465} 452}
466 453
467/* 454/*
@@ -474,10 +461,10 @@ static void ioapic_mask_entry(int apic, int pin)
474 unsigned long flags; 461 unsigned long flags;
475 union entry_union eu = { .entry.mask = 1 }; 462 union entry_union eu = { .entry.mask = 1 };
476 463
477 spin_lock_irqsave(&ioapic_lock, flags); 464 raw_spin_lock_irqsave(&ioapic_lock, flags);
478 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 465 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
479 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 466 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
480 spin_unlock_irqrestore(&ioapic_lock, flags); 467 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
481} 468}
482 469
483/* 470/*
@@ -604,9 +591,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
604 591
605 BUG_ON(!cfg); 592 BUG_ON(!cfg);
606 593
607 spin_lock_irqsave(&ioapic_lock, flags); 594 raw_spin_lock_irqsave(&ioapic_lock, flags);
608 __mask_IO_APIC_irq(cfg); 595 __mask_IO_APIC_irq(cfg);
609 spin_unlock_irqrestore(&ioapic_lock, flags); 596 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
610} 597}
611 598
612static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 599static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -614,9 +601,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
614 struct irq_cfg *cfg = desc->chip_data; 601 struct irq_cfg *cfg = desc->chip_data;
615 unsigned long flags; 602 unsigned long flags;
616 603
617 spin_lock_irqsave(&ioapic_lock, flags); 604 raw_spin_lock_irqsave(&ioapic_lock, flags);
618 __unmask_IO_APIC_irq(cfg); 605 __unmask_IO_APIC_irq(cfg);
619 spin_unlock_irqrestore(&ioapic_lock, flags); 606 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
620} 607}
621 608
622static void mask_IO_APIC_irq(unsigned int irq) 609static void mask_IO_APIC_irq(unsigned int irq)
@@ -1140,12 +1127,12 @@ void lock_vector_lock(void)
1140 /* Used to the online set of cpus does not change 1127 /* Used to the online set of cpus does not change
1141 * during assign_irq_vector. 1128 * during assign_irq_vector.
1142 */ 1129 */
1143 spin_lock(&vector_lock); 1130 raw_spin_lock(&vector_lock);
1144} 1131}
1145 1132
1146void unlock_vector_lock(void) 1133void unlock_vector_lock(void)
1147{ 1134{
1148 spin_unlock(&vector_lock); 1135 raw_spin_unlock(&vector_lock);
1149} 1136}
1150 1137
1151static int 1138static int
@@ -1162,7 +1149,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1162 * Also, we've got to be careful not to trash gate 1149 * Also, we've got to be careful not to trash gate
1163 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1150 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1164 */ 1151 */
1165 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1152 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1153 static int current_offset = VECTOR_OFFSET_START % 8;
1166 unsigned int old_vector; 1154 unsigned int old_vector;
1167 int cpu, err; 1155 int cpu, err;
1168 cpumask_var_t tmp_mask; 1156 cpumask_var_t tmp_mask;
@@ -1198,7 +1186,7 @@ next:
1198 if (vector >= first_system_vector) { 1186 if (vector >= first_system_vector) {
1199 /* If out of vectors on large boxen, must share them. */ 1187 /* If out of vectors on large boxen, must share them. */
1200 offset = (offset + 1) % 8; 1188 offset = (offset + 1) % 8;
1201 vector = FIRST_DEVICE_VECTOR + offset; 1189 vector = FIRST_EXTERNAL_VECTOR + offset;
1202 } 1190 }
1203 if (unlikely(current_vector == vector)) 1191 if (unlikely(current_vector == vector))
1204 continue; 1192 continue;
@@ -1232,9 +1220,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1232 int err; 1220 int err;
1233 unsigned long flags; 1221 unsigned long flags;
1234 1222
1235 spin_lock_irqsave(&vector_lock, flags); 1223 raw_spin_lock_irqsave(&vector_lock, flags);
1236 err = __assign_irq_vector(irq, cfg, mask); 1224 err = __assign_irq_vector(irq, cfg, mask);
1237 spin_unlock_irqrestore(&vector_lock, flags); 1225 raw_spin_unlock_irqrestore(&vector_lock, flags);
1238 return err; 1226 return err;
1239} 1227}
1240 1228
@@ -1268,11 +1256,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1268void __setup_vector_irq(int cpu) 1256void __setup_vector_irq(int cpu)
1269{ 1257{
1270 /* Initialize vector_irq on a new cpu */ 1258 /* Initialize vector_irq on a new cpu */
1271 /* This function must be called with vector_lock held */
1272 int irq, vector; 1259 int irq, vector;
1273 struct irq_cfg *cfg; 1260 struct irq_cfg *cfg;
1274 struct irq_desc *desc; 1261 struct irq_desc *desc;
1275 1262
1263 /*
1264 * vector_lock will make sure that we don't run into irq vector
1265 * assignments that might be happening on another cpu in parallel,
1266 * while we setup our initial vector to irq mappings.
1267 */
1268 raw_spin_lock(&vector_lock);
1276 /* Mark the inuse vectors */ 1269 /* Mark the inuse vectors */
1277 for_each_irq_desc(irq, desc) { 1270 for_each_irq_desc(irq, desc) {
1278 cfg = desc->chip_data; 1271 cfg = desc->chip_data;
@@ -1291,6 +1284,7 @@ void __setup_vector_irq(int cpu)
1291 if (!cpumask_test_cpu(cpu, cfg->domain)) 1284 if (!cpumask_test_cpu(cpu, cfg->domain))
1292 per_cpu(vector_irq, cpu)[vector] = -1; 1285 per_cpu(vector_irq, cpu)[vector] = -1;
1293 } 1286 }
1287 raw_spin_unlock(&vector_lock);
1294} 1288}
1295 1289
1296static struct irq_chip ioapic_chip; 1290static struct irq_chip ioapic_chip;
@@ -1440,6 +1434,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1440 1434
1441 cfg = desc->chip_data; 1435 cfg = desc->chip_data;
1442 1436
1437 /*
1438 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1439 * controllers like 8259. Now that IO-APIC can handle this irq, update
1440 * the cfg->domain.
1441 */
1442 if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1443 apic->vector_allocation_domain(0, cfg->domain);
1444
1443 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1445 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1444 return; 1446 return;
1445 1447
@@ -1473,7 +1475,7 @@ static struct {
1473 1475
1474static void __init setup_IO_APIC_irqs(void) 1476static void __init setup_IO_APIC_irqs(void)
1475{ 1477{
1476 int apic_id = 0, pin, idx, irq; 1478 int apic_id, pin, idx, irq;
1477 int notcon = 0; 1479 int notcon = 0;
1478 struct irq_desc *desc; 1480 struct irq_desc *desc;
1479 struct irq_cfg *cfg; 1481 struct irq_cfg *cfg;
@@ -1481,14 +1483,7 @@ static void __init setup_IO_APIC_irqs(void)
1481 1483
1482 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1484 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1483 1485
1484#ifdef CONFIG_ACPI 1486 for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1485 if (!acpi_disabled && acpi_ioapic) {
1486 apic_id = mp_find_ioapic(0);
1487 if (apic_id < 0)
1488 apic_id = 0;
1489 }
1490#endif
1491
1492 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { 1487 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1493 idx = find_irq_entry(apic_id, pin, mp_INT); 1488 idx = find_irq_entry(apic_id, pin, mp_INT);
1494 if (idx == -1) { 1489 if (idx == -1) {
@@ -1510,6 +1505,9 @@ static void __init setup_IO_APIC_irqs(void)
1510 1505
1511 irq = pin_2_irq(idx, apic_id, pin); 1506 irq = pin_2_irq(idx, apic_id, pin);
1512 1507
1508 if ((apic_id > 0) && (irq > 16))
1509 continue;
1510
1513 /* 1511 /*
1514 * Skip the timer IRQ if there's a quirk handler 1512 * Skip the timer IRQ if there's a quirk handler
1515 * installed and if it returns 1: 1513 * installed and if it returns 1:
@@ -1539,6 +1537,56 @@ static void __init setup_IO_APIC_irqs(void)
1539} 1537}
1540 1538
1541/* 1539/*
1540 * for the gsit that is not in first ioapic
1541 * but could not use acpi_register_gsi()
1542 * like some special sci in IBM x3330
1543 */
1544void setup_IO_APIC_irq_extra(u32 gsi)
1545{
1546 int apic_id = 0, pin, idx, irq;
1547 int node = cpu_to_node(boot_cpu_id);
1548 struct irq_desc *desc;
1549 struct irq_cfg *cfg;
1550
1551 /*
1552 * Convert 'gsi' to 'ioapic.pin'.
1553 */
1554 apic_id = mp_find_ioapic(gsi);
1555 if (apic_id < 0)
1556 return;
1557
1558 pin = mp_find_ioapic_pin(apic_id, gsi);
1559 idx = find_irq_entry(apic_id, pin, mp_INT);
1560 if (idx == -1)
1561 return;
1562
1563 irq = pin_2_irq(idx, apic_id, pin);
1564#ifdef CONFIG_SPARSE_IRQ
1565 desc = irq_to_desc(irq);
1566 if (desc)
1567 return;
1568#endif
1569 desc = irq_to_desc_alloc_node(irq, node);
1570 if (!desc) {
1571 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1572 return;
1573 }
1574
1575 cfg = desc->chip_data;
1576 add_pin_to_irq_node(cfg, node, apic_id, pin);
1577
1578 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1579 pr_debug("Pin %d-%d already programmed\n",
1580 mp_ioapics[apic_id].apicid, pin);
1581 return;
1582 }
1583 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1584
1585 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1586 irq_trigger(idx), irq_polarity(idx));
1587}
1588
1589/*
1542 * Set up the timer pin, possibly with the 8259A-master behind. 1590 * Set up the timer pin, possibly with the 8259A-master behind.
1543 */ 1591 */
1544static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1592static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -1601,14 +1649,14 @@ __apicdebuginit(void) print_IO_APIC(void)
1601 1649
1602 for (apic = 0; apic < nr_ioapics; apic++) { 1650 for (apic = 0; apic < nr_ioapics; apic++) {
1603 1651
1604 spin_lock_irqsave(&ioapic_lock, flags); 1652 raw_spin_lock_irqsave(&ioapic_lock, flags);
1605 reg_00.raw = io_apic_read(apic, 0); 1653 reg_00.raw = io_apic_read(apic, 0);
1606 reg_01.raw = io_apic_read(apic, 1); 1654 reg_01.raw = io_apic_read(apic, 1);
1607 if (reg_01.bits.version >= 0x10) 1655 if (reg_01.bits.version >= 0x10)
1608 reg_02.raw = io_apic_read(apic, 2); 1656 reg_02.raw = io_apic_read(apic, 2);
1609 if (reg_01.bits.version >= 0x20) 1657 if (reg_01.bits.version >= 0x20)
1610 reg_03.raw = io_apic_read(apic, 3); 1658 reg_03.raw = io_apic_read(apic, 3);
1611 spin_unlock_irqrestore(&ioapic_lock, flags); 1659 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1612 1660
1613 printk("\n"); 1661 printk("\n");
1614 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1662 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1830,7 +1878,7 @@ __apicdebuginit(void) print_PIC(void)
1830 1878
1831 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1879 printk(KERN_DEBUG "\nprinting PIC contents\n");
1832 1880
1833 spin_lock_irqsave(&i8259A_lock, flags); 1881 raw_spin_lock_irqsave(&i8259A_lock, flags);
1834 1882
1835 v = inb(0xa1) << 8 | inb(0x21); 1883 v = inb(0xa1) << 8 | inb(0x21);
1836 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1884 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1844,7 +1892,7 @@ __apicdebuginit(void) print_PIC(void)
1844 outb(0x0a,0xa0); 1892 outb(0x0a,0xa0);
1845 outb(0x0a,0x20); 1893 outb(0x0a,0x20);
1846 1894
1847 spin_unlock_irqrestore(&i8259A_lock, flags); 1895 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1848 1896
1849 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1897 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1850 1898
@@ -1903,9 +1951,9 @@ void __init enable_IO_APIC(void)
1903 * The number of IO-APIC IRQ registers (== #pins): 1951 * The number of IO-APIC IRQ registers (== #pins):
1904 */ 1952 */
1905 for (apic = 0; apic < nr_ioapics; apic++) { 1953 for (apic = 0; apic < nr_ioapics; apic++) {
1906 spin_lock_irqsave(&ioapic_lock, flags); 1954 raw_spin_lock_irqsave(&ioapic_lock, flags);
1907 reg_01.raw = io_apic_read(apic, 1); 1955 reg_01.raw = io_apic_read(apic, 1);
1908 spin_unlock_irqrestore(&ioapic_lock, flags); 1956 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1909 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1957 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1910 } 1958 }
1911 1959
@@ -2045,9 +2093,9 @@ void __init setup_ioapic_ids_from_mpc(void)
2045 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 2093 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2046 2094
2047 /* Read the register 0 value */ 2095 /* Read the register 0 value */
2048 spin_lock_irqsave(&ioapic_lock, flags); 2096 raw_spin_lock_irqsave(&ioapic_lock, flags);
2049 reg_00.raw = io_apic_read(apic_id, 0); 2097 reg_00.raw = io_apic_read(apic_id, 0);
2050 spin_unlock_irqrestore(&ioapic_lock, flags); 2098 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2051 2099
2052 old_id = mp_ioapics[apic_id].apicid; 2100 old_id = mp_ioapics[apic_id].apicid;
2053 2101
@@ -2106,16 +2154,16 @@ void __init setup_ioapic_ids_from_mpc(void)
2106 mp_ioapics[apic_id].apicid); 2154 mp_ioapics[apic_id].apicid);
2107 2155
2108 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 2156 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2109 spin_lock_irqsave(&ioapic_lock, flags); 2157 raw_spin_lock_irqsave(&ioapic_lock, flags);
2110 io_apic_write(apic_id, 0, reg_00.raw); 2158 io_apic_write(apic_id, 0, reg_00.raw);
2111 spin_unlock_irqrestore(&ioapic_lock, flags); 2159 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2112 2160
2113 /* 2161 /*
2114 * Sanity check 2162 * Sanity check
2115 */ 2163 */
2116 spin_lock_irqsave(&ioapic_lock, flags); 2164 raw_spin_lock_irqsave(&ioapic_lock, flags);
2117 reg_00.raw = io_apic_read(apic_id, 0); 2165 reg_00.raw = io_apic_read(apic_id, 0);
2118 spin_unlock_irqrestore(&ioapic_lock, flags); 2166 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2119 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 2167 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2120 printk("could not set ID!\n"); 2168 printk("could not set ID!\n");
2121 else 2169 else
@@ -2198,7 +2246,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2198 unsigned long flags; 2246 unsigned long flags;
2199 struct irq_cfg *cfg; 2247 struct irq_cfg *cfg;
2200 2248
2201 spin_lock_irqsave(&ioapic_lock, flags); 2249 raw_spin_lock_irqsave(&ioapic_lock, flags);
2202 if (irq < nr_legacy_irqs) { 2250 if (irq < nr_legacy_irqs) {
2203 disable_8259A_irq(irq); 2251 disable_8259A_irq(irq);
2204 if (i8259A_irq_pending(irq)) 2252 if (i8259A_irq_pending(irq))
@@ -2206,7 +2254,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2206 } 2254 }
2207 cfg = irq_cfg(irq); 2255 cfg = irq_cfg(irq);
2208 __unmask_IO_APIC_irq(cfg); 2256 __unmask_IO_APIC_irq(cfg);
2209 spin_unlock_irqrestore(&ioapic_lock, flags); 2257 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2210 2258
2211 return was_pending; 2259 return was_pending;
2212} 2260}
@@ -2217,9 +2265,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
2217 struct irq_cfg *cfg = irq_cfg(irq); 2265 struct irq_cfg *cfg = irq_cfg(irq);
2218 unsigned long flags; 2266 unsigned long flags;
2219 2267
2220 spin_lock_irqsave(&vector_lock, flags); 2268 raw_spin_lock_irqsave(&vector_lock, flags);
2221 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2269 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2222 spin_unlock_irqrestore(&vector_lock, flags); 2270 raw_spin_unlock_irqrestore(&vector_lock, flags);
2223 2271
2224 return 1; 2272 return 1;
2225} 2273}
@@ -2312,14 +2360,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2312 irq = desc->irq; 2360 irq = desc->irq;
2313 cfg = desc->chip_data; 2361 cfg = desc->chip_data;
2314 2362
2315 spin_lock_irqsave(&ioapic_lock, flags); 2363 raw_spin_lock_irqsave(&ioapic_lock, flags);
2316 ret = set_desc_affinity(desc, mask, &dest); 2364 ret = set_desc_affinity(desc, mask, &dest);
2317 if (!ret) { 2365 if (!ret) {
2318 /* Only the high 8 bits are valid. */ 2366 /* Only the high 8 bits are valid. */
2319 dest = SET_APIC_LOGICAL_ID(dest); 2367 dest = SET_APIC_LOGICAL_ID(dest);
2320 __target_IO_APIC_irq(irq, dest, cfg); 2368 __target_IO_APIC_irq(irq, dest, cfg);
2321 } 2369 }
2322 spin_unlock_irqrestore(&ioapic_lock, flags); 2370 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2323 2371
2324 return ret; 2372 return ret;
2325} 2373}
@@ -2554,9 +2602,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
2554 irq = desc->irq; 2602 irq = desc->irq;
2555 cfg = desc->chip_data; 2603 cfg = desc->chip_data;
2556 2604
2557 spin_lock_irqsave(&ioapic_lock, flags); 2605 raw_spin_lock_irqsave(&ioapic_lock, flags);
2558 __eoi_ioapic_irq(irq, cfg); 2606 __eoi_ioapic_irq(irq, cfg);
2559 spin_unlock_irqrestore(&ioapic_lock, flags); 2607 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2560} 2608}
2561 2609
2562static void ack_apic_level(unsigned int irq) 2610static void ack_apic_level(unsigned int irq)
@@ -3138,13 +3186,13 @@ static int ioapic_resume(struct sys_device *dev)
3138 data = container_of(dev, struct sysfs_ioapic_data, dev); 3186 data = container_of(dev, struct sysfs_ioapic_data, dev);
3139 entry = data->entry; 3187 entry = data->entry;
3140 3188
3141 spin_lock_irqsave(&ioapic_lock, flags); 3189 raw_spin_lock_irqsave(&ioapic_lock, flags);
3142 reg_00.raw = io_apic_read(dev->id, 0); 3190 reg_00.raw = io_apic_read(dev->id, 0);
3143 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 3191 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3144 reg_00.bits.ID = mp_ioapics[dev->id].apicid; 3192 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3145 io_apic_write(dev->id, 0, reg_00.raw); 3193 io_apic_write(dev->id, 0, reg_00.raw);
3146 } 3194 }
3147 spin_unlock_irqrestore(&ioapic_lock, flags); 3195 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3148 for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 3196 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3149 ioapic_write_entry(dev->id, i, entry[i]); 3197 ioapic_write_entry(dev->id, i, entry[i]);
3150 3198
@@ -3207,7 +3255,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3207 if (irq_want < nr_irqs_gsi) 3255 if (irq_want < nr_irqs_gsi)
3208 irq_want = nr_irqs_gsi; 3256 irq_want = nr_irqs_gsi;
3209 3257
3210 spin_lock_irqsave(&vector_lock, flags); 3258 raw_spin_lock_irqsave(&vector_lock, flags);
3211 for (new = irq_want; new < nr_irqs; new++) { 3259 for (new = irq_want; new < nr_irqs; new++) {
3212 desc_new = irq_to_desc_alloc_node(new, node); 3260 desc_new = irq_to_desc_alloc_node(new, node);
3213 if (!desc_new) { 3261 if (!desc_new) {
@@ -3226,14 +3274,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3226 irq = new; 3274 irq = new;
3227 break; 3275 break;
3228 } 3276 }
3229 spin_unlock_irqrestore(&vector_lock, flags); 3277 raw_spin_unlock_irqrestore(&vector_lock, flags);
3278
3279 if (irq > 0)
3280 dynamic_irq_init_keep_chip_data(irq);
3230 3281
3231 if (irq > 0) {
3232 dynamic_irq_init(irq);
3233 /* restore it, in case dynamic_irq_init clear it */
3234 if (desc_new)
3235 desc_new->chip_data = cfg_new;
3236 }
3237 return irq; 3282 return irq;
3238} 3283}
3239 3284
@@ -3255,20 +3300,13 @@ int create_irq(void)
3255void destroy_irq(unsigned int irq) 3300void destroy_irq(unsigned int irq)
3256{ 3301{
3257 unsigned long flags; 3302 unsigned long flags;
3258 struct irq_cfg *cfg;
3259 struct irq_desc *desc;
3260 3303
3261 /* store it, in case dynamic_irq_cleanup clear it */ 3304 dynamic_irq_cleanup_keep_chip_data(irq);
3262 desc = irq_to_desc(irq);
3263 cfg = desc->chip_data;
3264 dynamic_irq_cleanup(irq);
3265 /* connect back irq_cfg */
3266 desc->chip_data = cfg;
3267 3305
3268 free_irte(irq); 3306 free_irte(irq);
3269 spin_lock_irqsave(&vector_lock, flags); 3307 raw_spin_lock_irqsave(&vector_lock, flags);
3270 __clear_irq_vector(irq, cfg); 3308 __clear_irq_vector(irq, get_irq_chip_data(irq));
3271 spin_unlock_irqrestore(&vector_lock, flags); 3309 raw_spin_unlock_irqrestore(&vector_lock, flags);
3272} 3310}
3273 3311
3274/* 3312/*
@@ -3805,9 +3843,9 @@ int __init io_apic_get_redir_entries (int ioapic)
3805 union IO_APIC_reg_01 reg_01; 3843 union IO_APIC_reg_01 reg_01;
3806 unsigned long flags; 3844 unsigned long flags;
3807 3845
3808 spin_lock_irqsave(&ioapic_lock, flags); 3846 raw_spin_lock_irqsave(&ioapic_lock, flags);
3809 reg_01.raw = io_apic_read(ioapic, 1); 3847 reg_01.raw = io_apic_read(ioapic, 1);
3810 spin_unlock_irqrestore(&ioapic_lock, flags); 3848 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3811 3849
3812 return reg_01.bits.entries; 3850 return reg_01.bits.entries;
3813} 3851}
@@ -3969,9 +4007,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3969 if (physids_empty(apic_id_map)) 4007 if (physids_empty(apic_id_map))
3970 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 4008 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3971 4009
3972 spin_lock_irqsave(&ioapic_lock, flags); 4010 raw_spin_lock_irqsave(&ioapic_lock, flags);
3973 reg_00.raw = io_apic_read(ioapic, 0); 4011 reg_00.raw = io_apic_read(ioapic, 0);
3974 spin_unlock_irqrestore(&ioapic_lock, flags); 4012 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3975 4013
3976 if (apic_id >= get_physical_broadcast()) { 4014 if (apic_id >= get_physical_broadcast()) {
3977 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 4015 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -4005,10 +4043,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
4005 if (reg_00.bits.ID != apic_id) { 4043 if (reg_00.bits.ID != apic_id) {
4006 reg_00.bits.ID = apic_id; 4044 reg_00.bits.ID = apic_id;
4007 4045
4008 spin_lock_irqsave(&ioapic_lock, flags); 4046 raw_spin_lock_irqsave(&ioapic_lock, flags);
4009 io_apic_write(ioapic, 0, reg_00.raw); 4047 io_apic_write(ioapic, 0, reg_00.raw);
4010 reg_00.raw = io_apic_read(ioapic, 0); 4048 reg_00.raw = io_apic_read(ioapic, 0);
4011 spin_unlock_irqrestore(&ioapic_lock, flags); 4049 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4012 4050
4013 /* Sanity check */ 4051 /* Sanity check */
4014 if (reg_00.bits.ID != apic_id) { 4052 if (reg_00.bits.ID != apic_id) {
@@ -4029,9 +4067,9 @@ int __init io_apic_get_version(int ioapic)
4029 union IO_APIC_reg_01 reg_01; 4067 union IO_APIC_reg_01 reg_01;
4030 unsigned long flags; 4068 unsigned long flags;
4031 4069
4032 spin_lock_irqsave(&ioapic_lock, flags); 4070 raw_spin_lock_irqsave(&ioapic_lock, flags);
4033 reg_01.raw = io_apic_read(ioapic, 1); 4071 reg_01.raw = io_apic_read(ioapic, 1);
4034 spin_unlock_irqrestore(&ioapic_lock, flags); 4072 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4035 4073
4036 return reg_01.bits.version; 4074 return reg_01.bits.version;
4037} 4075}
@@ -4063,27 +4101,23 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
4063#ifdef CONFIG_SMP 4101#ifdef CONFIG_SMP
4064void __init setup_ioapic_dest(void) 4102void __init setup_ioapic_dest(void)
4065{ 4103{
4066 int pin, ioapic = 0, irq, irq_entry; 4104 int pin, ioapic, irq, irq_entry;
4067 struct irq_desc *desc; 4105 struct irq_desc *desc;
4068 const struct cpumask *mask; 4106 const struct cpumask *mask;
4069 4107
4070 if (skip_ioapic_setup == 1) 4108 if (skip_ioapic_setup == 1)
4071 return; 4109 return;
4072 4110
4073#ifdef CONFIG_ACPI 4111 for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
4074 if (!acpi_disabled && acpi_ioapic) {
4075 ioapic = mp_find_ioapic(0);
4076 if (ioapic < 0)
4077 ioapic = 0;
4078 }
4079#endif
4080
4081 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { 4112 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
4082 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 4113 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
4083 if (irq_entry == -1) 4114 if (irq_entry == -1)
4084 continue; 4115 continue;
4085 irq = pin_2_irq(irq_entry, ioapic, pin); 4116 irq = pin_2_irq(irq_entry, ioapic, pin);
4086 4117
4118 if ((ioapic > 0) && (irq > 16))
4119 continue;
4120
4087 desc = irq_to_desc(irq); 4121 desc = irq_to_desc(irq);
4088 4122
4089 /* 4123 /*
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 0159a69396cb..bd7c96b5e8d8 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
416 416
417 /* We can be called before check_nmi_watchdog, hence NULL check. */ 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
420 420
421 spin_lock(&lock); 421 raw_spin_lock(&lock);
422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
423 show_regs(regs); 423 show_regs(regs);
424 dump_stack(); 424 dump_stack();
425 spin_unlock(&lock); 425 raw_spin_unlock(&lock);
426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
427 427
428 rc = 1; 428 rc = 1;
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
438 * Ayiee, looks like this CPU is stuck ... 438 * Ayiee, looks like this CPU is stuck ...
439 * wait a few IRQs (5 seconds) before doing the oops ... 439 * wait a few IRQs (5 seconds) before doing the oops ...
440 */ 440 */
441 __this_cpu_inc(per_cpu_var(alert_counter)); 441 __this_cpu_inc(alert_counter);
442 if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) 442 if (__this_cpu_read(alert_counter) == 5 * nmi_hz)
443 /* 443 /*
444 * die_nmi will return ONLY if NOTIFY_STOP happens.. 444 * die_nmi will return ONLY if NOTIFY_STOP happens..
445 */ 445 */
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
447 regs, panic_on_timeout); 447 regs, panic_on_timeout);
448 } else { 448 } else {
449 __get_cpu_var(last_irq_sum) = sum; 449 __get_cpu_var(last_irq_sum) = sum;
450 __this_cpu_write(per_cpu_var(alert_counter), 0); 450 __this_cpu_write(alert_counter, 0);
451 } 451 }
452 452
453 /* see if the nmi watchdog went off */ 453 /* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 09b1698e0466..06130b52f012 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -22,10 +22,10 @@
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/cpu.h> 24#include <linux/cpu.h>
25#include <linux/sort.h>
26#include <linux/mutex.h> 25#include <linux/mutex.h>
27#include <linux/uaccess.h> 26#include <linux/uaccess.h>
28#include <linux/kvm_para.h> 27#include <linux/kvm_para.h>
28#include <linux/range.h>
29 29
30#include <asm/processor.h> 30#include <asm/processor.h>
31#include <asm/e820.h> 31#include <asm/e820.h>
@@ -34,11 +34,6 @@
34 34
35#include "mtrr.h" 35#include "mtrr.h"
36 36
37struct res_range {
38 unsigned long start;
39 unsigned long end;
40};
41
42struct var_mtrr_range_state { 37struct var_mtrr_range_state {
43 unsigned long base_pfn; 38 unsigned long base_pfn;
44 unsigned long size_pfn; 39 unsigned long size_pfn;
@@ -56,7 +51,7 @@ struct var_mtrr_state {
56/* Should be related to MTRR_VAR_RANGES nums */ 51/* Should be related to MTRR_VAR_RANGES nums */
57#define RANGE_NUM 256 52#define RANGE_NUM 256
58 53
59static struct res_range __initdata range[RANGE_NUM]; 54static struct range __initdata range[RANGE_NUM];
60static int __initdata nr_range; 55static int __initdata nr_range;
61 56
62static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; 57static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
@@ -64,152 +59,11 @@ static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
64static int __initdata debug_print; 59static int __initdata debug_print;
65#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) 60#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
66 61
67
68static int __init
69add_range(struct res_range *range, int nr_range,
70 unsigned long start, unsigned long end)
71{
72 /* Out of slots: */
73 if (nr_range >= RANGE_NUM)
74 return nr_range;
75
76 range[nr_range].start = start;
77 range[nr_range].end = end;
78
79 nr_range++;
80
81 return nr_range;
82}
83
84static int __init
85add_range_with_merge(struct res_range *range, int nr_range,
86 unsigned long start, unsigned long end)
87{
88 int i;
89
90 /* Try to merge it with old one: */
91 for (i = 0; i < nr_range; i++) {
92 unsigned long final_start, final_end;
93 unsigned long common_start, common_end;
94
95 if (!range[i].end)
96 continue;
97
98 common_start = max(range[i].start, start);
99 common_end = min(range[i].end, end);
100 if (common_start > common_end + 1)
101 continue;
102
103 final_start = min(range[i].start, start);
104 final_end = max(range[i].end, end);
105
106 range[i].start = final_start;
107 range[i].end = final_end;
108 return nr_range;
109 }
110
111 /* Need to add it: */
112 return add_range(range, nr_range, start, end);
113}
114
115static void __init
116subtract_range(struct res_range *range, unsigned long start, unsigned long end)
117{
118 int i, j;
119
120 for (j = 0; j < RANGE_NUM; j++) {
121 if (!range[j].end)
122 continue;
123
124 if (start <= range[j].start && end >= range[j].end) {
125 range[j].start = 0;
126 range[j].end = 0;
127 continue;
128 }
129
130 if (start <= range[j].start && end < range[j].end &&
131 range[j].start < end + 1) {
132 range[j].start = end + 1;
133 continue;
134 }
135
136
137 if (start > range[j].start && end >= range[j].end &&
138 range[j].end > start - 1) {
139 range[j].end = start - 1;
140 continue;
141 }
142
143 if (start > range[j].start && end < range[j].end) {
144 /* Find the new spare: */
145 for (i = 0; i < RANGE_NUM; i++) {
146 if (range[i].end == 0)
147 break;
148 }
149 if (i < RANGE_NUM) {
150 range[i].end = range[j].end;
151 range[i].start = end + 1;
152 } else {
153 printk(KERN_ERR "run of slot in ranges\n");
154 }
155 range[j].end = start - 1;
156 continue;
157 }
158 }
159}
160
161static int __init cmp_range(const void *x1, const void *x2)
162{
163 const struct res_range *r1 = x1;
164 const struct res_range *r2 = x2;
165 long start1, start2;
166
167 start1 = r1->start;
168 start2 = r2->start;
169
170 return start1 - start2;
171}
172
173static int __init clean_sort_range(struct res_range *range, int az)
174{
175 int i, j, k = az - 1, nr_range = 0;
176
177 for (i = 0; i < k; i++) {
178 if (range[i].end)
179 continue;
180 for (j = k; j > i; j--) {
181 if (range[j].end) {
182 k = j;
183 break;
184 }
185 }
186 if (j == i)
187 break;
188 range[i].start = range[k].start;
189 range[i].end = range[k].end;
190 range[k].start = 0;
191 range[k].end = 0;
192 k--;
193 }
194 /* count it */
195 for (i = 0; i < az; i++) {
196 if (!range[i].end) {
197 nr_range = i;
198 break;
199 }
200 }
201
202 /* sort them */
203 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
204
205 return nr_range;
206}
207
208#define BIOS_BUG_MSG KERN_WARNING \ 62#define BIOS_BUG_MSG KERN_WARNING \
209 "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" 63 "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
210 64
211static int __init 65static int __init
212x86_get_mtrr_mem_range(struct res_range *range, int nr_range, 66x86_get_mtrr_mem_range(struct range *range, int nr_range,
213 unsigned long extra_remove_base, 67 unsigned long extra_remove_base,
214 unsigned long extra_remove_size) 68 unsigned long extra_remove_size)
215{ 69{
@@ -223,14 +77,14 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
223 continue; 77 continue;
224 base = range_state[i].base_pfn; 78 base = range_state[i].base_pfn;
225 size = range_state[i].size_pfn; 79 size = range_state[i].size_pfn;
226 nr_range = add_range_with_merge(range, nr_range, base, 80 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
227 base + size - 1); 81 base, base + size);
228 } 82 }
229 if (debug_print) { 83 if (debug_print) {
230 printk(KERN_DEBUG "After WB checking\n"); 84 printk(KERN_DEBUG "After WB checking\n");
231 for (i = 0; i < nr_range; i++) 85 for (i = 0; i < nr_range; i++)
232 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", 86 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
233 range[i].start, range[i].end + 1); 87 range[i].start, range[i].end);
234 } 88 }
235 89
236 /* Take out UC ranges: */ 90 /* Take out UC ranges: */
@@ -252,19 +106,19 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
252 size -= (1<<(20-PAGE_SHIFT)) - base; 106 size -= (1<<(20-PAGE_SHIFT)) - base;
253 base = 1<<(20-PAGE_SHIFT); 107 base = 1<<(20-PAGE_SHIFT);
254 } 108 }
255 subtract_range(range, base, base + size - 1); 109 subtract_range(range, RANGE_NUM, base, base + size);
256 } 110 }
257 if (extra_remove_size) 111 if (extra_remove_size)
258 subtract_range(range, extra_remove_base, 112 subtract_range(range, RANGE_NUM, extra_remove_base,
259 extra_remove_base + extra_remove_size - 1); 113 extra_remove_base + extra_remove_size);
260 114
261 if (debug_print) { 115 if (debug_print) {
262 printk(KERN_DEBUG "After UC checking\n"); 116 printk(KERN_DEBUG "After UC checking\n");
263 for (i = 0; i < RANGE_NUM; i++) { 117 for (i = 0; i < RANGE_NUM; i++) {
264 if (!range[i].end) 118 if (!range[i].end)
265 continue; 119 continue;
266 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", 120 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
267 range[i].start, range[i].end + 1); 121 range[i].start, range[i].end);
268 } 122 }
269 } 123 }
270 124
@@ -273,26 +127,22 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
273 if (debug_print) { 127 if (debug_print) {
274 printk(KERN_DEBUG "After sorting\n"); 128 printk(KERN_DEBUG "After sorting\n");
275 for (i = 0; i < nr_range; i++) 129 for (i = 0; i < nr_range; i++)
276 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", 130 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
277 range[i].start, range[i].end + 1); 131 range[i].start, range[i].end);
278 } 132 }
279 133
280 /* clear those is not used */
281 for (i = nr_range; i < RANGE_NUM; i++)
282 memset(&range[i], 0, sizeof(range[i]));
283
284 return nr_range; 134 return nr_range;
285} 135}
286 136
287#ifdef CONFIG_MTRR_SANITIZER 137#ifdef CONFIG_MTRR_SANITIZER
288 138
289static unsigned long __init sum_ranges(struct res_range *range, int nr_range) 139static unsigned long __init sum_ranges(struct range *range, int nr_range)
290{ 140{
291 unsigned long sum = 0; 141 unsigned long sum = 0;
292 int i; 142 int i;
293 143
294 for (i = 0; i < nr_range; i++) 144 for (i = 0; i < nr_range; i++)
295 sum += range[i].end + 1 - range[i].start; 145 sum += range[i].end - range[i].start;
296 146
297 return sum; 147 return sum;
298} 148}
@@ -621,7 +471,7 @@ static int __init parse_mtrr_spare_reg(char *arg)
621early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); 471early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
622 472
623static int __init 473static int __init
624x86_setup_var_mtrrs(struct res_range *range, int nr_range, 474x86_setup_var_mtrrs(struct range *range, int nr_range,
625 u64 chunk_size, u64 gran_size) 475 u64 chunk_size, u64 gran_size)
626{ 476{
627 struct var_mtrr_state var_state; 477 struct var_mtrr_state var_state;
@@ -639,7 +489,7 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
639 /* Write the range: */ 489 /* Write the range: */
640 for (i = 0; i < nr_range; i++) { 490 for (i = 0; i < nr_range; i++) {
641 set_var_mtrr_range(&var_state, range[i].start, 491 set_var_mtrr_range(&var_state, range[i].start,
642 range[i].end - range[i].start + 1); 492 range[i].end - range[i].start);
643 } 493 }
644 494
645 /* Write the last range: */ 495 /* Write the last range: */
@@ -742,7 +592,7 @@ mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
742 unsigned long x_remove_base, 592 unsigned long x_remove_base,
743 unsigned long x_remove_size, int i) 593 unsigned long x_remove_size, int i)
744{ 594{
745 static struct res_range range_new[RANGE_NUM]; 595 static struct range range_new[RANGE_NUM];
746 unsigned long range_sums_new; 596 unsigned long range_sums_new;
747 static int nr_range_new; 597 static int nr_range_new;
748 int num_reg; 598 int num_reg;
@@ -869,10 +719,10 @@ int __init mtrr_cleanup(unsigned address_bits)
869 * [0, 1M) should always be covered by var mtrr with WB 719 * [0, 1M) should always be covered by var mtrr with WB
870 * and fixed mtrrs should take effect before var mtrr for it: 720 * and fixed mtrrs should take effect before var mtrr for it:
871 */ 721 */
872 nr_range = add_range_with_merge(range, nr_range, 0, 722 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
873 (1ULL<<(20 - PAGE_SHIFT)) - 1); 723 1ULL<<(20 - PAGE_SHIFT));
874 /* Sort the ranges: */ 724 /* Sort the ranges: */
875 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); 725 sort_range(range, nr_range);
876 726
877 range_sums = sum_ranges(range, nr_range); 727 range_sums = sum_ranges(range, nr_range);
878 printk(KERN_INFO "total RAM covered: %ldM\n", 728 printk(KERN_INFO "total RAM covered: %ldM\n",
@@ -1089,9 +939,9 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1089 nr_range = 0; 939 nr_range = 0;
1090 if (mtrr_tom2) { 940 if (mtrr_tom2) {
1091 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); 941 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
1092 range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1; 942 range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
1093 if (highest_pfn < range[nr_range].end + 1) 943 if (highest_pfn < range[nr_range].end)
1094 highest_pfn = range[nr_range].end + 1; 944 highest_pfn = range[nr_range].end;
1095 nr_range++; 945 nr_range++;
1096 } 946 }
1097 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); 947 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
@@ -1103,15 +953,15 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1103 953
1104 /* Check the holes: */ 954 /* Check the holes: */
1105 for (i = 0; i < nr_range - 1; i++) { 955 for (i = 0; i < nr_range - 1; i++) {
1106 if (range[i].end + 1 < range[i+1].start) 956 if (range[i].end < range[i+1].start)
1107 total_trim_size += real_trim_memory(range[i].end + 1, 957 total_trim_size += real_trim_memory(range[i].end,
1108 range[i+1].start); 958 range[i+1].start);
1109 } 959 }
1110 960
1111 /* Check the top: */ 961 /* Check the top: */
1112 i = nr_range - 1; 962 i = nr_range - 1;
1113 if (range[i].end + 1 < end_pfn) 963 if (range[i].end < end_pfn)
1114 total_trim_size += real_trim_memory(range[i].end + 1, 964 total_trim_size += real_trim_memory(range[i].end,
1115 end_pfn); 965 end_pfn);
1116 966
1117 if (total_trim_size) { 967 if (total_trim_size) {
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index fe4622e8c837..79556bd9b602 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -145,6 +145,7 @@ struct set_mtrr_data {
145 145
146/** 146/**
147 * ipi_handler - Synchronisation handler. Executed by "other" CPUs. 147 * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
148 * @info: pointer to mtrr configuration data
148 * 149 *
149 * Returns nothing. 150 * Returns nothing.
150 */ 151 */
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index a966b753e496..740b440fbd73 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -12,21 +12,13 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/ioport.h>
16#include <linux/string.h>
17#include <linux/kexec.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/pfn.h> 15#include <linux/pfn.h>
21#include <linux/suspend.h> 16#include <linux/suspend.h>
22#include <linux/firmware-map.h> 17#include <linux/firmware-map.h>
23 18
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/e820.h> 19#include <asm/e820.h>
27#include <asm/proto.h> 20#include <asm/proto.h>
28#include <asm/setup.h> 21#include <asm/setup.h>
29#include <asm/trampoline.h>
30 22
31/* 23/*
32 * The e820 map is the map that gets modified e.g. with command line parameters 24 * The e820 map is the map that gets modified e.g. with command line parameters
@@ -730,319 +722,44 @@ core_initcall(e820_mark_nvs_memory);
730#endif 722#endif
731 723
732/* 724/*
733 * Early reserved memory areas. 725 * Find a free area with specified alignment in a specific range.
734 */
735#define MAX_EARLY_RES 32
736
737struct early_res {
738 u64 start, end;
739 char name[16];
740 char overlap_ok;
741};
742static struct early_res early_res[MAX_EARLY_RES] __initdata = {
743 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
744#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
745 /*
746 * But first pinch a few for the stack/trampoline stuff
747 * FIXME: Don't need the extra page at 4K, but need to fix
748 * trampoline before removing it. (see the GDT stuff)
749 */
750 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
751#endif
752
753 {}
754};
755
756static int __init find_overlapped_early(u64 start, u64 end)
757{
758 int i;
759 struct early_res *r;
760
761 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
762 r = &early_res[i];
763 if (end > r->start && start < r->end)
764 break;
765 }
766
767 return i;
768}
769
770/*
771 * Drop the i-th range from the early reservation map,
772 * by copying any higher ranges down one over it, and
773 * clearing what had been the last slot.
774 */
775static void __init drop_range(int i)
776{
777 int j;
778
779 for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
780 ;
781
782 memmove(&early_res[i], &early_res[i + 1],
783 (j - 1 - i) * sizeof(struct early_res));
784
785 early_res[j - 1].end = 0;
786}
787
788/*
789 * Split any existing ranges that:
790 * 1) are marked 'overlap_ok', and
791 * 2) overlap with the stated range [start, end)
792 * into whatever portion (if any) of the existing range is entirely
793 * below or entirely above the stated range. Drop the portion
794 * of the existing range that overlaps with the stated range,
795 * which will allow the caller of this routine to then add that
796 * stated range without conflicting with any existing range.
797 */ 726 */
798static void __init drop_overlaps_that_are_ok(u64 start, u64 end) 727u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
799{ 728{
800 int i; 729 int i;
801 struct early_res *r;
802 u64 lower_start, lower_end;
803 u64 upper_start, upper_end;
804 char name[16];
805 730
806 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { 731 for (i = 0; i < e820.nr_map; i++) {
807 r = &early_res[i]; 732 struct e820entry *ei = &e820.map[i];
733 u64 addr;
734 u64 ei_start, ei_last;
808 735
809 /* Continue past non-overlapping ranges */ 736 if (ei->type != E820_RAM)
810 if (end <= r->start || start >= r->end)
811 continue; 737 continue;
812 738
813 /* 739 ei_last = ei->addr + ei->size;
814 * Leave non-ok overlaps as is; let caller 740 ei_start = ei->addr;
815 * panic "Overlapping early reservations" 741 addr = find_early_area(ei_start, ei_last, start, end,
816 * when it hits this overlap. 742 size, align);
817 */
818 if (!r->overlap_ok)
819 return;
820
821 /*
822 * We have an ok overlap. We will drop it from the early
823 * reservation map, and add back in any non-overlapping
824 * portions (lower or upper) as separate, overlap_ok,
825 * non-overlapping ranges.
826 */
827
828 /* 1. Note any non-overlapping (lower or upper) ranges. */
829 strncpy(name, r->name, sizeof(name) - 1);
830
831 lower_start = lower_end = 0;
832 upper_start = upper_end = 0;
833 if (r->start < start) {
834 lower_start = r->start;
835 lower_end = start;
836 }
837 if (r->end > end) {
838 upper_start = end;
839 upper_end = r->end;
840 }
841
842 /* 2. Drop the original ok overlapping range */
843 drop_range(i);
844
845 i--; /* resume for-loop on copied down entry */
846
847 /* 3. Add back in any non-overlapping ranges. */
848 if (lower_end)
849 reserve_early_overlap_ok(lower_start, lower_end, name);
850 if (upper_end)
851 reserve_early_overlap_ok(upper_start, upper_end, name);
852 }
853}
854
855static void __init __reserve_early(u64 start, u64 end, char *name,
856 int overlap_ok)
857{
858 int i;
859 struct early_res *r;
860
861 i = find_overlapped_early(start, end);
862 if (i >= MAX_EARLY_RES)
863 panic("Too many early reservations");
864 r = &early_res[i];
865 if (r->end)
866 panic("Overlapping early reservations "
867 "%llx-%llx %s to %llx-%llx %s\n",
868 start, end - 1, name?name:"", r->start,
869 r->end - 1, r->name);
870 r->start = start;
871 r->end = end;
872 r->overlap_ok = overlap_ok;
873 if (name)
874 strncpy(r->name, name, sizeof(r->name) - 1);
875}
876
877/*
878 * A few early reservtations come here.
879 *
880 * The 'overlap_ok' in the name of this routine does -not- mean it
881 * is ok for these reservations to overlap an earlier reservation.
882 * Rather it means that it is ok for subsequent reservations to
883 * overlap this one.
884 *
885 * Use this entry point to reserve early ranges when you are doing
886 * so out of "Paranoia", reserving perhaps more memory than you need,
887 * just in case, and don't mind a subsequent overlapping reservation
888 * that is known to be needed.
889 *
890 * The drop_overlaps_that_are_ok() call here isn't really needed.
891 * It would be needed if we had two colliding 'overlap_ok'
892 * reservations, so that the second such would not panic on the
893 * overlap with the first. We don't have any such as of this
894 * writing, but might as well tolerate such if it happens in
895 * the future.
896 */
897void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
898{
899 drop_overlaps_that_are_ok(start, end);
900 __reserve_early(start, end, name, 1);
901}
902
903/*
904 * Most early reservations come here.
905 *
906 * We first have drop_overlaps_that_are_ok() drop any pre-existing
907 * 'overlap_ok' ranges, so that we can then reserve this memory
908 * range without risk of panic'ing on an overlapping overlap_ok
909 * early reservation.
910 */
911void __init reserve_early(u64 start, u64 end, char *name)
912{
913 if (start >= end)
914 return;
915
916 drop_overlaps_that_are_ok(start, end);
917 __reserve_early(start, end, name, 0);
918}
919
920void __init free_early(u64 start, u64 end)
921{
922 struct early_res *r;
923 int i;
924
925 i = find_overlapped_early(start, end);
926 r = &early_res[i];
927 if (i >= MAX_EARLY_RES || r->end != end || r->start != start)
928 panic("free_early on not reserved area: %llx-%llx!",
929 start, end - 1);
930
931 drop_range(i);
932}
933
934void __init early_res_to_bootmem(u64 start, u64 end)
935{
936 int i, count;
937 u64 final_start, final_end;
938
939 count = 0;
940 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++)
941 count++;
942
943 printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
944 count, start, end);
945 for (i = 0; i < count; i++) {
946 struct early_res *r = &early_res[i];
947 printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
948 r->start, r->end, r->name);
949 final_start = max(start, r->start);
950 final_end = min(end, r->end);
951 if (final_start >= final_end) {
952 printk(KERN_CONT "\n");
953 continue;
954 }
955 printk(KERN_CONT " ==> [%010llx - %010llx]\n",
956 final_start, final_end);
957 reserve_bootmem_generic(final_start, final_end - final_start,
958 BOOTMEM_DEFAULT);
959 }
960}
961 743
962/* Check for already reserved areas */ 744 if (addr != -1ULL)
963static inline int __init bad_addr(u64 *addrp, u64 size, u64 align) 745 return addr;
964{
965 int i;
966 u64 addr = *addrp;
967 int changed = 0;
968 struct early_res *r;
969again:
970 i = find_overlapped_early(addr, addr + size);
971 r = &early_res[i];
972 if (i < MAX_EARLY_RES && r->end) {
973 *addrp = addr = round_up(r->end, align);
974 changed = 1;
975 goto again;
976 } 746 }
977 return changed; 747 return -1ULL;
978} 748}
979 749
980/* Check for already reserved areas */ 750u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
981static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
982{ 751{
983 int i; 752 return find_e820_area(start, end, size, align);
984 u64 addr = *addrp, last;
985 u64 size = *sizep;
986 int changed = 0;
987again:
988 last = addr + size;
989 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
990 struct early_res *r = &early_res[i];
991 if (last > r->start && addr < r->start) {
992 size = r->start - addr;
993 changed = 1;
994 goto again;
995 }
996 if (last > r->end && addr < r->end) {
997 addr = round_up(r->end, align);
998 size = last - addr;
999 changed = 1;
1000 goto again;
1001 }
1002 if (last <= r->end && addr >= r->start) {
1003 (*sizep)++;
1004 return 0;
1005 }
1006 }
1007 if (changed) {
1008 *addrp = addr;
1009 *sizep = size;
1010 }
1011 return changed;
1012} 753}
1013 754
1014/* 755u64 __init get_max_mapped(void)
1015 * Find a free area with specified alignment in a specific range.
1016 */
1017u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
1018{ 756{
1019 int i; 757 u64 end = max_pfn_mapped;
1020 758
1021 for (i = 0; i < e820.nr_map; i++) { 759 end <<= PAGE_SHIFT;
1022 struct e820entry *ei = &e820.map[i];
1023 u64 addr, last;
1024 u64 ei_last;
1025 760
1026 if (ei->type != E820_RAM) 761 return end;
1027 continue;
1028 addr = round_up(ei->addr, align);
1029 ei_last = ei->addr + ei->size;
1030 if (addr < start)
1031 addr = round_up(start, align);
1032 if (addr >= ei_last)
1033 continue;
1034 while (bad_addr(&addr, size, align) && addr+size <= ei_last)
1035 ;
1036 last = addr + size;
1037 if (last > ei_last)
1038 continue;
1039 if (last > end)
1040 continue;
1041 return addr;
1042 }
1043 return -1ULL;
1044} 762}
1045
1046/* 763/*
1047 * Find next free range after *start 764 * Find next free range after *start
1048 */ 765 */
@@ -1052,25 +769,19 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
1052 769
1053 for (i = 0; i < e820.nr_map; i++) { 770 for (i = 0; i < e820.nr_map; i++) {
1054 struct e820entry *ei = &e820.map[i]; 771 struct e820entry *ei = &e820.map[i];
1055 u64 addr, last; 772 u64 addr;
1056 u64 ei_last; 773 u64 ei_start, ei_last;
1057 774
1058 if (ei->type != E820_RAM) 775 if (ei->type != E820_RAM)
1059 continue; 776 continue;
1060 addr = round_up(ei->addr, align); 777
1061 ei_last = ei->addr + ei->size; 778 ei_last = ei->addr + ei->size;
1062 if (addr < start) 779 ei_start = ei->addr;
1063 addr = round_up(start, align); 780 addr = find_early_area_size(ei_start, ei_last, start,
1064 if (addr >= ei_last) 781 sizep, align);
1065 continue; 782
1066 *sizep = ei_last - addr; 783 if (addr != -1ULL)
1067 while (bad_addr_size(&addr, sizep, align) && 784 return addr;
1068 addr + *sizep <= ei_last)
1069 ;
1070 last = addr + *sizep;
1071 if (last > ei_last)
1072 continue;
1073 return addr;
1074 } 785 }
1075 786
1076 return -1ULL; 787 return -1ULL;
@@ -1429,6 +1140,8 @@ void __init e820_reserve_resources_late(void)
1429 end = MAX_RESOURCE_SIZE; 1140 end = MAX_RESOURCE_SIZE;
1430 if (start >= end) 1141 if (start >= end)
1431 continue; 1142 continue;
1143 printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
1144 start, end);
1432 reserve_region_with_split(&iomem_resource, start, end, 1145 reserve_region_with_split(&iomem_resource, start, end,
1433 "RAM buffer"); 1146 "RAM buffer");
1434 } 1147 }
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 5051b94c9069..adedeef1dedc 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,6 +29,16 @@ static void __init i386_default_early_setup(void)
29 29
30void __init i386_start_kernel(void) 30void __init i386_start_kernel(void)
31{ 31{
32#ifdef CONFIG_X86_TRAMPOLINE
33 /*
34 * But first pinch a few for the stack/trampoline stuff
35 * FIXME: Don't need the extra page at 4K, but need to fix
36 * trampoline before removing it. (see the GDT stuff)
37 */
38 reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
39 "EX TRAMPOLINE");
40#endif
41
32 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 42 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
33 43
34#ifdef CONFIG_BLK_DEV_INITRD 44#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 7fd318bac59c..37c3d4b17d85 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -442,8 +442,8 @@ is386: movl $2,%ecx # set MP
442 */ 442 */
443 cmpb $0,ready 443 cmpb $0,ready
444 jne 1f 444 jne 1f
445 movl $per_cpu__gdt_page,%eax 445 movl $gdt_page,%eax
446 movl $per_cpu__stack_canary,%ecx 446 movl $stack_canary,%ecx
447 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 447 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
448 shrl $16, %ecx 448 shrl $16, %ecx
449 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 449 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
@@ -706,7 +706,7 @@ idt_descr:
706 .word 0 # 32 bit align gdt_desc.address 706 .word 0 # 32 bit align gdt_desc.address
707ENTRY(early_gdt_descr) 707ENTRY(early_gdt_descr)
708 .word GDT_ENTRIES*8-1 708 .word GDT_ENTRIES*8-1
709 .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ 709 .long gdt_page /* Overwritten for secondary CPUs */
710 710
711/* 711/*
712 * The boot_gdt must mirror the equivalent in setup.S and is 712 * The boot_gdt must mirror the equivalent in setup.S and is
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index df89102bef80..8c93a84bb627 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34static int i8259A_auto_eoi; 34static int i8259A_auto_eoi;
35DEFINE_SPINLOCK(i8259A_lock); 35DEFINE_RAW_SPINLOCK(i8259A_lock);
36static void mask_and_ack_8259A(unsigned int); 36static void mask_and_ack_8259A(unsigned int);
37 37
38struct irq_chip i8259A_chip = { 38struct irq_chip i8259A_chip = {
@@ -68,13 +68,13 @@ void disable_8259A_irq(unsigned int irq)
68 unsigned int mask = 1 << irq; 68 unsigned int mask = 1 << irq;
69 unsigned long flags; 69 unsigned long flags;
70 70
71 spin_lock_irqsave(&i8259A_lock, flags); 71 raw_spin_lock_irqsave(&i8259A_lock, flags);
72 cached_irq_mask |= mask; 72 cached_irq_mask |= mask;
73 if (irq & 8) 73 if (irq & 8)
74 outb(cached_slave_mask, PIC_SLAVE_IMR); 74 outb(cached_slave_mask, PIC_SLAVE_IMR);
75 else 75 else
76 outb(cached_master_mask, PIC_MASTER_IMR); 76 outb(cached_master_mask, PIC_MASTER_IMR);
77 spin_unlock_irqrestore(&i8259A_lock, flags); 77 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
78} 78}
79 79
80void enable_8259A_irq(unsigned int irq) 80void enable_8259A_irq(unsigned int irq)
@@ -82,13 +82,13 @@ void enable_8259A_irq(unsigned int irq)
82 unsigned int mask = ~(1 << irq); 82 unsigned int mask = ~(1 << irq);
83 unsigned long flags; 83 unsigned long flags;
84 84
85 spin_lock_irqsave(&i8259A_lock, flags); 85 raw_spin_lock_irqsave(&i8259A_lock, flags);
86 cached_irq_mask &= mask; 86 cached_irq_mask &= mask;
87 if (irq & 8) 87 if (irq & 8)
88 outb(cached_slave_mask, PIC_SLAVE_IMR); 88 outb(cached_slave_mask, PIC_SLAVE_IMR);
89 else 89 else
90 outb(cached_master_mask, PIC_MASTER_IMR); 90 outb(cached_master_mask, PIC_MASTER_IMR);
91 spin_unlock_irqrestore(&i8259A_lock, flags); 91 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
92} 92}
93 93
94int i8259A_irq_pending(unsigned int irq) 94int i8259A_irq_pending(unsigned int irq)
@@ -97,12 +97,12 @@ int i8259A_irq_pending(unsigned int irq)
97 unsigned long flags; 97 unsigned long flags;
98 int ret; 98 int ret;
99 99
100 spin_lock_irqsave(&i8259A_lock, flags); 100 raw_spin_lock_irqsave(&i8259A_lock, flags);
101 if (irq < 8) 101 if (irq < 8)
102 ret = inb(PIC_MASTER_CMD) & mask; 102 ret = inb(PIC_MASTER_CMD) & mask;
103 else 103 else
104 ret = inb(PIC_SLAVE_CMD) & (mask >> 8); 104 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
105 spin_unlock_irqrestore(&i8259A_lock, flags); 105 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
106 106
107 return ret; 107 return ret;
108} 108}
@@ -150,7 +150,7 @@ static void mask_and_ack_8259A(unsigned int irq)
150 unsigned int irqmask = 1 << irq; 150 unsigned int irqmask = 1 << irq;
151 unsigned long flags; 151 unsigned long flags;
152 152
153 spin_lock_irqsave(&i8259A_lock, flags); 153 raw_spin_lock_irqsave(&i8259A_lock, flags);
154 /* 154 /*
155 * Lightweight spurious IRQ detection. We do not want 155 * Lightweight spurious IRQ detection. We do not want
156 * to overdo spurious IRQ handling - it's usually a sign 156 * to overdo spurious IRQ handling - it's usually a sign
@@ -183,7 +183,7 @@ handle_real_irq:
183 outb(cached_master_mask, PIC_MASTER_IMR); 183 outb(cached_master_mask, PIC_MASTER_IMR);
184 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 184 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
185 } 185 }
186 spin_unlock_irqrestore(&i8259A_lock, flags); 186 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
187 return; 187 return;
188 188
189spurious_8259A_irq: 189spurious_8259A_irq:
@@ -285,24 +285,24 @@ void mask_8259A(void)
285{ 285{
286 unsigned long flags; 286 unsigned long flags;
287 287
288 spin_lock_irqsave(&i8259A_lock, flags); 288 raw_spin_lock_irqsave(&i8259A_lock, flags);
289 289
290 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 290 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
291 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 291 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
292 292
293 spin_unlock_irqrestore(&i8259A_lock, flags); 293 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
294} 294}
295 295
296void unmask_8259A(void) 296void unmask_8259A(void)
297{ 297{
298 unsigned long flags; 298 unsigned long flags;
299 299
300 spin_lock_irqsave(&i8259A_lock, flags); 300 raw_spin_lock_irqsave(&i8259A_lock, flags);
301 301
302 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 302 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
303 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 303 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
304 304
305 spin_unlock_irqrestore(&i8259A_lock, flags); 305 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
306} 306}
307 307
308void init_8259A(int auto_eoi) 308void init_8259A(int auto_eoi)
@@ -311,7 +311,7 @@ void init_8259A(int auto_eoi)
311 311
312 i8259A_auto_eoi = auto_eoi; 312 i8259A_auto_eoi = auto_eoi;
313 313
314 spin_lock_irqsave(&i8259A_lock, flags); 314 raw_spin_lock_irqsave(&i8259A_lock, flags);
315 315
316 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 316 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
317 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 317 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -356,5 +356,5 @@ void init_8259A(int auto_eoi)
356 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 356 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
357 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 357 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
358 358
359 spin_unlock_irqrestore(&i8259A_lock, flags); 359 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
360} 360}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index d5932226614f..fce55d532631 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
84}; 84};
85 85
86DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 86DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
87 [0 ... IRQ0_VECTOR - 1] = -1, 87 [0 ... NR_VECTORS - 1] = -1,
88 [IRQ0_VECTOR] = 0,
89 [IRQ1_VECTOR] = 1,
90 [IRQ2_VECTOR] = 2,
91 [IRQ3_VECTOR] = 3,
92 [IRQ4_VECTOR] = 4,
93 [IRQ5_VECTOR] = 5,
94 [IRQ6_VECTOR] = 6,
95 [IRQ7_VECTOR] = 7,
96 [IRQ8_VECTOR] = 8,
97 [IRQ9_VECTOR] = 9,
98 [IRQ10_VECTOR] = 10,
99 [IRQ11_VECTOR] = 11,
100 [IRQ12_VECTOR] = 12,
101 [IRQ13_VECTOR] = 13,
102 [IRQ14_VECTOR] = 14,
103 [IRQ15_VECTOR] = 15,
104 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
105}; 88};
106 89
107int vector_used_by_percpu_irq(unsigned int vector) 90int vector_used_by_percpu_irq(unsigned int vector)
@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
116 return 0; 99 return 0;
117} 100}
118 101
102/* Number of legacy interrupts */
103int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
104
119void __init init_ISA_irqs(void) 105void __init init_ISA_irqs(void)
120{ 106{
121 int i; 107 int i;
@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
142 128
143void __init init_IRQ(void) 129void __init init_IRQ(void)
144{ 130{
131 int i;
132
133 /*
134 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
135 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
136 * then this configuration will likely be static after the boot. If
137 * these IRQ's are handled by more mordern controllers like IO-APIC,
138 * then this vector space can be freed and re-used dynamically as the
139 * irq's migrate etc.
140 */
141 for (i = 0; i < nr_legacy_irqs; i++)
142 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
143
145 x86_init.irqs.intr_init(); 144 x86_init.irqs.intr_init();
146} 145}
147 146
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 5de9f4a9c3fd..b43bbaebe2c0 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -49,6 +49,7 @@
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/kdebug.h> 50#include <linux/kdebug.h>
51#include <linux/kallsyms.h> 51#include <linux/kallsyms.h>
52#include <linux/ftrace.h>
52 53
53#include <asm/cacheflush.h> 54#include <asm/cacheflush.h>
54#include <asm/desc.h> 55#include <asm/desc.h>
@@ -106,16 +107,22 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
106}; 107};
107const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); 108const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
108 109
109/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ 110static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
110static void __kprobes set_jmp_op(void *from, void *to)
111{ 111{
112 struct __arch_jmp_op { 112 struct __arch_relative_insn {
113 char op; 113 u8 op;
114 s32 raddr; 114 s32 raddr;
115 } __attribute__((packed)) * jop; 115 } __attribute__((packed)) *insn;
116 jop = (struct __arch_jmp_op *)from; 116
117 jop->raddr = (s32)((long)(to) - ((long)(from) + 5)); 117 insn = (struct __arch_relative_insn *)from;
118 jop->op = RELATIVEJUMP_INSTRUCTION; 118 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
119 insn->op = op;
120}
121
122/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
123static void __kprobes synthesize_reljump(void *from, void *to)
124{
125 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
119} 126}
120 127
121/* 128/*
@@ -202,7 +209,7 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
202 /* 209 /*
203 * Basically, kp->ainsn.insn has an original instruction. 210 * Basically, kp->ainsn.insn has an original instruction.
204 * However, RIP-relative instruction can not do single-stepping 211 * However, RIP-relative instruction can not do single-stepping
205 * at different place, fix_riprel() tweaks the displacement of 212 * at different place, __copy_instruction() tweaks the displacement of
206 * that instruction. In that case, we can't recover the instruction 213 * that instruction. In that case, we can't recover the instruction
207 * from the kp->ainsn.insn. 214 * from the kp->ainsn.insn.
208 * 215 *
@@ -284,21 +291,37 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
284} 291}
285 292
286/* 293/*
287 * Adjust the displacement if the instruction uses the %rip-relative 294 * Copy an instruction and adjust the displacement if the instruction
288 * addressing mode. 295 * uses the %rip-relative addressing mode.
289 * If it does, Return the address of the 32-bit displacement word. 296 * If it does, Return the address of the 32-bit displacement word.
290 * If not, return null. 297 * If not, return null.
291 * Only applicable to 64-bit x86. 298 * Only applicable to 64-bit x86.
292 */ 299 */
293static void __kprobes fix_riprel(struct kprobe *p) 300static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
294{ 301{
295#ifdef CONFIG_X86_64
296 struct insn insn; 302 struct insn insn;
297 kernel_insn_init(&insn, p->ainsn.insn); 303 int ret;
304 kprobe_opcode_t buf[MAX_INSN_SIZE];
298 305
306 kernel_insn_init(&insn, src);
307 if (recover) {
308 insn_get_opcode(&insn);
309 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
310 ret = recover_probed_instruction(buf,
311 (unsigned long)src);
312 if (ret)
313 return 0;
314 kernel_insn_init(&insn, buf);
315 }
316 }
317 insn_get_length(&insn);
318 memcpy(dest, insn.kaddr, insn.length);
319
320#ifdef CONFIG_X86_64
299 if (insn_rip_relative(&insn)) { 321 if (insn_rip_relative(&insn)) {
300 s64 newdisp; 322 s64 newdisp;
301 u8 *disp; 323 u8 *disp;
324 kernel_insn_init(&insn, dest);
302 insn_get_displacement(&insn); 325 insn_get_displacement(&insn);
303 /* 326 /*
304 * The copied instruction uses the %rip-relative addressing 327 * The copied instruction uses the %rip-relative addressing
@@ -312,20 +335,23 @@ static void __kprobes fix_riprel(struct kprobe *p)
312 * extension of the original signed 32-bit displacement would 335 * extension of the original signed 32-bit displacement would
313 * have given. 336 * have given.
314 */ 337 */
315 newdisp = (u8 *) p->addr + (s64) insn.displacement.value - 338 newdisp = (u8 *) src + (s64) insn.displacement.value -
316 (u8 *) p->ainsn.insn; 339 (u8 *) dest;
317 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ 340 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
318 disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn); 341 disp = (u8 *) dest + insn_offset_displacement(&insn);
319 *(s32 *) disp = (s32) newdisp; 342 *(s32 *) disp = (s32) newdisp;
320 } 343 }
321#endif 344#endif
345 return insn.length;
322} 346}
323 347
324static void __kprobes arch_copy_kprobe(struct kprobe *p) 348static void __kprobes arch_copy_kprobe(struct kprobe *p)
325{ 349{
326 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 350 /*
327 351 * Copy an instruction without recovering int3, because it will be
328 fix_riprel(p); 352 * put by another subsystem.
353 */
354 __copy_instruction(p->ainsn.insn, p->addr, 0);
329 355
330 if (can_boost(p->addr)) 356 if (can_boost(p->addr))
331 p->ainsn.boostable = 0; 357 p->ainsn.boostable = 0;
@@ -406,18 +432,6 @@ static void __kprobes restore_btf(void)
406 update_debugctlmsr(current->thread.debugctlmsr); 432 update_debugctlmsr(current->thread.debugctlmsr);
407} 433}
408 434
409static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
410{
411 clear_btf();
412 regs->flags |= X86_EFLAGS_TF;
413 regs->flags &= ~X86_EFLAGS_IF;
414 /* single step inline if the instruction is an int3 */
415 if (p->opcode == BREAKPOINT_INSTRUCTION)
416 regs->ip = (unsigned long)p->addr;
417 else
418 regs->ip = (unsigned long)p->ainsn.insn;
419}
420
421void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 435void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
422 struct pt_regs *regs) 436 struct pt_regs *regs)
423{ 437{
@@ -429,20 +443,50 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
429 *sara = (unsigned long) &kretprobe_trampoline; 443 *sara = (unsigned long) &kretprobe_trampoline;
430} 444}
431 445
446#ifdef CONFIG_OPTPROBES
447static int __kprobes setup_detour_execution(struct kprobe *p,
448 struct pt_regs *regs,
449 int reenter);
450#else
451#define setup_detour_execution(p, regs, reenter) (0)
452#endif
453
432static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, 454static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
433 struct kprobe_ctlblk *kcb) 455 struct kprobe_ctlblk *kcb, int reenter)
434{ 456{
457 if (setup_detour_execution(p, regs, reenter))
458 return;
459
435#if !defined(CONFIG_PREEMPT) 460#if !defined(CONFIG_PREEMPT)
436 if (p->ainsn.boostable == 1 && !p->post_handler) { 461 if (p->ainsn.boostable == 1 && !p->post_handler) {
437 /* Boost up -- we can execute copied instructions directly */ 462 /* Boost up -- we can execute copied instructions directly */
438 reset_current_kprobe(); 463 if (!reenter)
464 reset_current_kprobe();
465 /*
466 * Reentering boosted probe doesn't reset current_kprobe,
467 * nor set current_kprobe, because it doesn't use single
468 * stepping.
469 */
439 regs->ip = (unsigned long)p->ainsn.insn; 470 regs->ip = (unsigned long)p->ainsn.insn;
440 preempt_enable_no_resched(); 471 preempt_enable_no_resched();
441 return; 472 return;
442 } 473 }
443#endif 474#endif
444 prepare_singlestep(p, regs); 475 if (reenter) {
445 kcb->kprobe_status = KPROBE_HIT_SS; 476 save_previous_kprobe(kcb);
477 set_current_kprobe(p, regs, kcb);
478 kcb->kprobe_status = KPROBE_REENTER;
479 } else
480 kcb->kprobe_status = KPROBE_HIT_SS;
481 /* Prepare real single stepping */
482 clear_btf();
483 regs->flags |= X86_EFLAGS_TF;
484 regs->flags &= ~X86_EFLAGS_IF;
485 /* single step inline if the instruction is an int3 */
486 if (p->opcode == BREAKPOINT_INSTRUCTION)
487 regs->ip = (unsigned long)p->addr;
488 else
489 regs->ip = (unsigned long)p->ainsn.insn;
446} 490}
447 491
448/* 492/*
@@ -456,11 +500,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
456 switch (kcb->kprobe_status) { 500 switch (kcb->kprobe_status) {
457 case KPROBE_HIT_SSDONE: 501 case KPROBE_HIT_SSDONE:
458 case KPROBE_HIT_ACTIVE: 502 case KPROBE_HIT_ACTIVE:
459 save_previous_kprobe(kcb);
460 set_current_kprobe(p, regs, kcb);
461 kprobes_inc_nmissed_count(p); 503 kprobes_inc_nmissed_count(p);
462 prepare_singlestep(p, regs); 504 setup_singlestep(p, regs, kcb, 1);
463 kcb->kprobe_status = KPROBE_REENTER;
464 break; 505 break;
465 case KPROBE_HIT_SS: 506 case KPROBE_HIT_SS:
466 /* A probe has been hit in the codepath leading up to, or just 507 /* A probe has been hit in the codepath leading up to, or just
@@ -535,13 +576,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
535 * more here. 576 * more here.
536 */ 577 */
537 if (!p->pre_handler || !p->pre_handler(p, regs)) 578 if (!p->pre_handler || !p->pre_handler(p, regs))
538 setup_singlestep(p, regs, kcb); 579 setup_singlestep(p, regs, kcb, 0);
539 return 1; 580 return 1;
540 } 581 }
541 } else if (kprobe_running()) { 582 } else if (kprobe_running()) {
542 p = __get_cpu_var(current_kprobe); 583 p = __get_cpu_var(current_kprobe);
543 if (p->break_handler && p->break_handler(p, regs)) { 584 if (p->break_handler && p->break_handler(p, regs)) {
544 setup_singlestep(p, regs, kcb); 585 setup_singlestep(p, regs, kcb, 0);
545 return 1; 586 return 1;
546 } 587 }
547 } /* else: not a kprobe fault; let the kernel handle it */ 588 } /* else: not a kprobe fault; let the kernel handle it */
@@ -550,6 +591,69 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
550 return 0; 591 return 0;
551} 592}
552 593
594#ifdef CONFIG_X86_64
595#define SAVE_REGS_STRING \
596 /* Skip cs, ip, orig_ax. */ \
597 " subq $24, %rsp\n" \
598 " pushq %rdi\n" \
599 " pushq %rsi\n" \
600 " pushq %rdx\n" \
601 " pushq %rcx\n" \
602 " pushq %rax\n" \
603 " pushq %r8\n" \
604 " pushq %r9\n" \
605 " pushq %r10\n" \
606 " pushq %r11\n" \
607 " pushq %rbx\n" \
608 " pushq %rbp\n" \
609 " pushq %r12\n" \
610 " pushq %r13\n" \
611 " pushq %r14\n" \
612 " pushq %r15\n"
613#define RESTORE_REGS_STRING \
614 " popq %r15\n" \
615 " popq %r14\n" \
616 " popq %r13\n" \
617 " popq %r12\n" \
618 " popq %rbp\n" \
619 " popq %rbx\n" \
620 " popq %r11\n" \
621 " popq %r10\n" \
622 " popq %r9\n" \
623 " popq %r8\n" \
624 " popq %rax\n" \
625 " popq %rcx\n" \
626 " popq %rdx\n" \
627 " popq %rsi\n" \
628 " popq %rdi\n" \
629 /* Skip orig_ax, ip, cs */ \
630 " addq $24, %rsp\n"
631#else
632#define SAVE_REGS_STRING \
633 /* Skip cs, ip, orig_ax and gs. */ \
634 " subl $16, %esp\n" \
635 " pushl %fs\n" \
636 " pushl %ds\n" \
637 " pushl %es\n" \
638 " pushl %eax\n" \
639 " pushl %ebp\n" \
640 " pushl %edi\n" \
641 " pushl %esi\n" \
642 " pushl %edx\n" \
643 " pushl %ecx\n" \
644 " pushl %ebx\n"
645#define RESTORE_REGS_STRING \
646 " popl %ebx\n" \
647 " popl %ecx\n" \
648 " popl %edx\n" \
649 " popl %esi\n" \
650 " popl %edi\n" \
651 " popl %ebp\n" \
652 " popl %eax\n" \
653 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
654 " addl $24, %esp\n"
655#endif
656
553/* 657/*
554 * When a retprobed function returns, this code saves registers and 658 * When a retprobed function returns, this code saves registers and
555 * calls trampoline_handler() runs, which calls the kretprobe's handler. 659 * calls trampoline_handler() runs, which calls the kretprobe's handler.
@@ -563,65 +667,16 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
563 /* We don't bother saving the ss register */ 667 /* We don't bother saving the ss register */
564 " pushq %rsp\n" 668 " pushq %rsp\n"
565 " pushfq\n" 669 " pushfq\n"
566 /* 670 SAVE_REGS_STRING
567 * Skip cs, ip, orig_ax.
568 * trampoline_handler() will plug in these values
569 */
570 " subq $24, %rsp\n"
571 " pushq %rdi\n"
572 " pushq %rsi\n"
573 " pushq %rdx\n"
574 " pushq %rcx\n"
575 " pushq %rax\n"
576 " pushq %r8\n"
577 " pushq %r9\n"
578 " pushq %r10\n"
579 " pushq %r11\n"
580 " pushq %rbx\n"
581 " pushq %rbp\n"
582 " pushq %r12\n"
583 " pushq %r13\n"
584 " pushq %r14\n"
585 " pushq %r15\n"
586 " movq %rsp, %rdi\n" 671 " movq %rsp, %rdi\n"
587 " call trampoline_handler\n" 672 " call trampoline_handler\n"
588 /* Replace saved sp with true return address. */ 673 /* Replace saved sp with true return address. */
589 " movq %rax, 152(%rsp)\n" 674 " movq %rax, 152(%rsp)\n"
590 " popq %r15\n" 675 RESTORE_REGS_STRING
591 " popq %r14\n"
592 " popq %r13\n"
593 " popq %r12\n"
594 " popq %rbp\n"
595 " popq %rbx\n"
596 " popq %r11\n"
597 " popq %r10\n"
598 " popq %r9\n"
599 " popq %r8\n"
600 " popq %rax\n"
601 " popq %rcx\n"
602 " popq %rdx\n"
603 " popq %rsi\n"
604 " popq %rdi\n"
605 /* Skip orig_ax, ip, cs */
606 " addq $24, %rsp\n"
607 " popfq\n" 676 " popfq\n"
608#else 677#else
609 " pushf\n" 678 " pushf\n"
610 /* 679 SAVE_REGS_STRING
611 * Skip cs, ip, orig_ax and gs.
612 * trampoline_handler() will plug in these values
613 */
614 " subl $16, %esp\n"
615 " pushl %fs\n"
616 " pushl %es\n"
617 " pushl %ds\n"
618 " pushl %eax\n"
619 " pushl %ebp\n"
620 " pushl %edi\n"
621 " pushl %esi\n"
622 " pushl %edx\n"
623 " pushl %ecx\n"
624 " pushl %ebx\n"
625 " movl %esp, %eax\n" 680 " movl %esp, %eax\n"
626 " call trampoline_handler\n" 681 " call trampoline_handler\n"
627 /* Move flags to cs */ 682 /* Move flags to cs */
@@ -629,15 +684,7 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
629 " movl %edx, 52(%esp)\n" 684 " movl %edx, 52(%esp)\n"
630 /* Replace saved flags with true return address. */ 685 /* Replace saved flags with true return address. */
631 " movl %eax, 56(%esp)\n" 686 " movl %eax, 56(%esp)\n"
632 " popl %ebx\n" 687 RESTORE_REGS_STRING
633 " popl %ecx\n"
634 " popl %edx\n"
635 " popl %esi\n"
636 " popl %edi\n"
637 " popl %ebp\n"
638 " popl %eax\n"
639 /* Skip ds, es, fs, gs, orig_ax and ip */
640 " addl $24, %esp\n"
641 " popf\n" 688 " popf\n"
642#endif 689#endif
643 " ret\n"); 690 " ret\n");
@@ -805,8 +852,8 @@ static void __kprobes resume_execution(struct kprobe *p,
805 * These instructions can be executed directly if it 852 * These instructions can be executed directly if it
806 * jumps back to correct address. 853 * jumps back to correct address.
807 */ 854 */
808 set_jmp_op((void *)regs->ip, 855 synthesize_reljump((void *)regs->ip,
809 (void *)orig_ip + (regs->ip - copy_ip)); 856 (void *)orig_ip + (regs->ip - copy_ip));
810 p->ainsn.boostable = 1; 857 p->ainsn.boostable = 1;
811 } else { 858 } else {
812 p->ainsn.boostable = -1; 859 p->ainsn.boostable = -1;
@@ -1033,6 +1080,358 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1033 return 0; 1080 return 0;
1034} 1081}
1035 1082
1083
1084#ifdef CONFIG_OPTPROBES
1085
1086/* Insert a call instruction at address 'from', which calls address 'to'.*/
1087static void __kprobes synthesize_relcall(void *from, void *to)
1088{
1089 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
1090}
1091
1092/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
1093static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
1094 unsigned long val)
1095{
1096#ifdef CONFIG_X86_64
1097 *addr++ = 0x48;
1098 *addr++ = 0xbf;
1099#else
1100 *addr++ = 0xb8;
1101#endif
1102 *(unsigned long *)addr = val;
1103}
1104
1105void __kprobes kprobes_optinsn_template_holder(void)
1106{
1107 asm volatile (
1108 ".global optprobe_template_entry\n"
1109 "optprobe_template_entry: \n"
1110#ifdef CONFIG_X86_64
1111 /* We don't bother saving the ss register */
1112 " pushq %rsp\n"
1113 " pushfq\n"
1114 SAVE_REGS_STRING
1115 " movq %rsp, %rsi\n"
1116 ".global optprobe_template_val\n"
1117 "optprobe_template_val: \n"
1118 ASM_NOP5
1119 ASM_NOP5
1120 ".global optprobe_template_call\n"
1121 "optprobe_template_call: \n"
1122 ASM_NOP5
1123 /* Move flags to rsp */
1124 " movq 144(%rsp), %rdx\n"
1125 " movq %rdx, 152(%rsp)\n"
1126 RESTORE_REGS_STRING
1127 /* Skip flags entry */
1128 " addq $8, %rsp\n"
1129 " popfq\n"
1130#else /* CONFIG_X86_32 */
1131 " pushf\n"
1132 SAVE_REGS_STRING
1133 " movl %esp, %edx\n"
1134 ".global optprobe_template_val\n"
1135 "optprobe_template_val: \n"
1136 ASM_NOP5
1137 ".global optprobe_template_call\n"
1138 "optprobe_template_call: \n"
1139 ASM_NOP5
1140 RESTORE_REGS_STRING
1141 " addl $4, %esp\n" /* skip cs */
1142 " popf\n"
1143#endif
1144 ".global optprobe_template_end\n"
1145 "optprobe_template_end: \n");
1146}
1147
1148#define TMPL_MOVE_IDX \
1149 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
1150#define TMPL_CALL_IDX \
1151 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
1152#define TMPL_END_IDX \
1153 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
1154
1155#define INT3_SIZE sizeof(kprobe_opcode_t)
1156
1157/* Optimized kprobe call back function: called from optinsn */
1158static void __kprobes optimized_callback(struct optimized_kprobe *op,
1159 struct pt_regs *regs)
1160{
1161 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1162
1163 preempt_disable();
1164 if (kprobe_running()) {
1165 kprobes_inc_nmissed_count(&op->kp);
1166 } else {
1167 /* Save skipped registers */
1168#ifdef CONFIG_X86_64
1169 regs->cs = __KERNEL_CS;
1170#else
1171 regs->cs = __KERNEL_CS | get_kernel_rpl();
1172 regs->gs = 0;
1173#endif
1174 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
1175 regs->orig_ax = ~0UL;
1176
1177 __get_cpu_var(current_kprobe) = &op->kp;
1178 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1179 opt_pre_handler(&op->kp, regs);
1180 __get_cpu_var(current_kprobe) = NULL;
1181 }
1182 preempt_enable_no_resched();
1183}
1184
1185static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
1186{
1187 int len = 0, ret;
1188
1189 while (len < RELATIVEJUMP_SIZE) {
1190 ret = __copy_instruction(dest + len, src + len, 1);
1191 if (!ret || !can_boost(dest + len))
1192 return -EINVAL;
1193 len += ret;
1194 }
1195 /* Check whether the address range is reserved */
1196 if (ftrace_text_reserved(src, src + len - 1) ||
1197 alternatives_text_reserved(src, src + len - 1))
1198 return -EBUSY;
1199
1200 return len;
1201}
1202
1203/* Check whether insn is indirect jump */
1204static int __kprobes insn_is_indirect_jump(struct insn *insn)
1205{
1206 return ((insn->opcode.bytes[0] == 0xff &&
1207 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
1208 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
1209}
1210
1211/* Check whether insn jumps into specified address range */
1212static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
1213{
1214 unsigned long target = 0;
1215
1216 switch (insn->opcode.bytes[0]) {
1217 case 0xe0: /* loopne */
1218 case 0xe1: /* loope */
1219 case 0xe2: /* loop */
1220 case 0xe3: /* jcxz */
1221 case 0xe9: /* near relative jump */
1222 case 0xeb: /* short relative jump */
1223 break;
1224 case 0x0f:
1225 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
1226 break;
1227 return 0;
1228 default:
1229 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
1230 break;
1231 return 0;
1232 }
1233 target = (unsigned long)insn->next_byte + insn->immediate.value;
1234
1235 return (start <= target && target <= start + len);
1236}
1237
1238/* Decode whole function to ensure any instructions don't jump into target */
1239static int __kprobes can_optimize(unsigned long paddr)
1240{
1241 int ret;
1242 unsigned long addr, size = 0, offset = 0;
1243 struct insn insn;
1244 kprobe_opcode_t buf[MAX_INSN_SIZE];
1245 /* Dummy buffers for lookup_symbol_attrs */
1246 static char __dummy_buf[KSYM_NAME_LEN];
1247
1248 /* Lookup symbol including addr */
1249 if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf))
1250 return 0;
1251
1252 /* Check there is enough space for a relative jump. */
1253 if (size - offset < RELATIVEJUMP_SIZE)
1254 return 0;
1255
1256 /* Decode instructions */
1257 addr = paddr - offset;
1258 while (addr < paddr - offset + size) { /* Decode until function end */
1259 if (search_exception_tables(addr))
1260 /*
1261 * Since some fixup code will jumps into this function,
1262 * we can't optimize kprobe in this function.
1263 */
1264 return 0;
1265 kernel_insn_init(&insn, (void *)addr);
1266 insn_get_opcode(&insn);
1267 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
1268 ret = recover_probed_instruction(buf, addr);
1269 if (ret)
1270 return 0;
1271 kernel_insn_init(&insn, buf);
1272 }
1273 insn_get_length(&insn);
1274 /* Recover address */
1275 insn.kaddr = (void *)addr;
1276 insn.next_byte = (void *)(addr + insn.length);
1277 /* Check any instructions don't jump into target */
1278 if (insn_is_indirect_jump(&insn) ||
1279 insn_jump_into_range(&insn, paddr + INT3_SIZE,
1280 RELATIVE_ADDR_SIZE))
1281 return 0;
1282 addr += insn.length;
1283 }
1284
1285 return 1;
1286}
1287
1288/* Check optimized_kprobe can actually be optimized. */
1289int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
1290{
1291 int i;
1292 struct kprobe *p;
1293
1294 for (i = 1; i < op->optinsn.size; i++) {
1295 p = get_kprobe(op->kp.addr + i);
1296 if (p && !kprobe_disabled(p))
1297 return -EEXIST;
1298 }
1299
1300 return 0;
1301}
1302
1303/* Check the addr is within the optimized instructions. */
1304int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op,
1305 unsigned long addr)
1306{
1307 return ((unsigned long)op->kp.addr <= addr &&
1308 (unsigned long)op->kp.addr + op->optinsn.size > addr);
1309}
1310
1311/* Free optimized instruction slot */
1312static __kprobes
1313void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
1314{
1315 if (op->optinsn.insn) {
1316 free_optinsn_slot(op->optinsn.insn, dirty);
1317 op->optinsn.insn = NULL;
1318 op->optinsn.size = 0;
1319 }
1320}
1321
1322void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
1323{
1324 __arch_remove_optimized_kprobe(op, 1);
1325}
1326
1327/*
1328 * Copy replacing target instructions
1329 * Target instructions MUST be relocatable (checked inside)
1330 */
1331int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
1332{
1333 u8 *buf;
1334 int ret;
1335 long rel;
1336
1337 if (!can_optimize((unsigned long)op->kp.addr))
1338 return -EILSEQ;
1339
1340 op->optinsn.insn = get_optinsn_slot();
1341 if (!op->optinsn.insn)
1342 return -ENOMEM;
1343
1344 /*
1345 * Verify if the address gap is in 2GB range, because this uses
1346 * a relative jump.
1347 */
1348 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
1349 if (abs(rel) > 0x7fffffff)
1350 return -ERANGE;
1351
1352 buf = (u8 *)op->optinsn.insn;
1353
1354 /* Copy instructions into the out-of-line buffer */
1355 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
1356 if (ret < 0) {
1357 __arch_remove_optimized_kprobe(op, 0);
1358 return ret;
1359 }
1360 op->optinsn.size = ret;
1361
1362 /* Copy arch-dep-instance from template */
1363 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
1364
1365 /* Set probe information */
1366 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
1367
1368 /* Set probe function call */
1369 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
1370
1371 /* Set returning jmp instruction at the tail of out-of-line buffer */
1372 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
1373 (u8 *)op->kp.addr + op->optinsn.size);
1374
1375 flush_icache_range((unsigned long) buf,
1376 (unsigned long) buf + TMPL_END_IDX +
1377 op->optinsn.size + RELATIVEJUMP_SIZE);
1378 return 0;
1379}
1380
1381/* Replace a breakpoint (int3) with a relative jump. */
1382int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op)
1383{
1384 unsigned char jmp_code[RELATIVEJUMP_SIZE];
1385 s32 rel = (s32)((long)op->optinsn.insn -
1386 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
1387
1388 /* Backup instructions which will be replaced by jump address */
1389 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
1390 RELATIVE_ADDR_SIZE);
1391
1392 jmp_code[0] = RELATIVEJUMP_OPCODE;
1393 *(s32 *)(&jmp_code[1]) = rel;
1394
1395 /*
1396 * text_poke_smp doesn't support NMI/MCE code modifying.
1397 * However, since kprobes itself also doesn't support NMI/MCE
1398 * code probing, it's not a problem.
1399 */
1400 text_poke_smp(op->kp.addr, jmp_code, RELATIVEJUMP_SIZE);
1401 return 0;
1402}
1403
1404/* Replace a relative jump with a breakpoint (int3). */
1405void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
1406{
1407 u8 buf[RELATIVEJUMP_SIZE];
1408
1409 /* Set int3 to first byte for kprobes */
1410 buf[0] = BREAKPOINT_INSTRUCTION;
1411 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
1412 text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
1413}
1414
1415static int __kprobes setup_detour_execution(struct kprobe *p,
1416 struct pt_regs *regs,
1417 int reenter)
1418{
1419 struct optimized_kprobe *op;
1420
1421 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
1422 /* This kprobe is really able to run optimized path. */
1423 op = container_of(p, struct optimized_kprobe, kp);
1424 /* Detour through copied instructions */
1425 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
1426 if (!reenter)
1427 reset_current_kprobe();
1428 preempt_enable_no_resched();
1429 return 1;
1430 }
1431 return 0;
1432}
1433#endif
1434
1036int __init arch_init_kprobes(void) 1435int __init arch_init_kprobes(void)
1037{ 1436{
1038 return 0; 1437 return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 712d15fdc416..71825806cd44 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -7,6 +7,8 @@
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/dmi.h> 9#include <linux/dmi.h>
10#include <linux/range.h>
11
10#include <asm/pci-direct.h> 12#include <asm/pci-direct.h>
11#include <linux/sort.h> 13#include <linux/sort.h>
12#include <asm/io.h> 14#include <asm/io.h>
@@ -30,11 +32,6 @@ static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
30 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, 32 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
31}; 33};
32 34
33struct range {
34 u64 start;
35 u64 end;
36};
37
38static int __cpuinit cmp_range(const void *x1, const void *x2) 35static int __cpuinit cmp_range(const void *x1, const void *x2)
39{ 36{
40 const struct range *r1 = x1; 37 const struct range *r1 = x1;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1b1739d16310..1db183ed7c01 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -428,10 +428,6 @@ struct pv_mmu_ops pv_mmu_ops = {
428 .ptep_modify_prot_start = __ptep_modify_prot_start, 428 .ptep_modify_prot_start = __ptep_modify_prot_start,
429 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 429 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
430 430
431#ifdef CONFIG_HIGHPTE
432 .kmap_atomic_pte = kmap_atomic,
433#endif
434
435#if PAGETABLE_LEVELS >= 3 431#if PAGETABLE_LEVELS >= 3
436#ifdef CONFIG_X86_PAE 432#ifdef CONFIG_X86_PAE
437 .set_pte_atomic = native_set_pte_atomic, 433 .set_pte_atomic = native_set_pte_atomic,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 75e14e21f61a..1aa966c565f9 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -65,7 +65,7 @@ int dma_set_mask(struct device *dev, u64 mask)
65} 65}
66EXPORT_SYMBOL(dma_set_mask); 66EXPORT_SYMBOL(dma_set_mask);
67 67
68#ifdef CONFIG_X86_64 68#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
69static __initdata void *dma32_bootmem_ptr; 69static __initdata void *dma32_bootmem_ptr;
70static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); 70static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
71 71
@@ -116,14 +116,21 @@ static void __init dma32_free_bootmem(void)
116 dma32_bootmem_ptr = NULL; 116 dma32_bootmem_ptr = NULL;
117 dma32_bootmem_size = 0; 117 dma32_bootmem_size = 0;
118} 118}
119#else
120void __init dma32_reserve_bootmem(void)
121{
122}
123static void __init dma32_free_bootmem(void)
124{
125}
126
119#endif 127#endif
120 128
121void __init pci_iommu_alloc(void) 129void __init pci_iommu_alloc(void)
122{ 130{
123#ifdef CONFIG_X86_64
124 /* free the range so iommu could get some range less than 4G */ 131 /* free the range so iommu could get some range less than 4G */
125 dma32_free_bootmem(); 132 dma32_free_bootmem();
126#endif 133
127 if (pci_swiotlb_detect()) 134 if (pci_swiotlb_detect())
128 goto out; 135 goto out;
129 136
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 704bddcdf64d..8e1aac86b50c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), 461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
462 }, 462 },
463 }, 463 },
464 { /* Handle problems with rebooting on the iMac9,1. */
465 .callback = set_pci_reboot,
466 .ident = "Apple iMac9,1",
467 .matches = {
468 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
469 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
470 },
471 },
464 { } 472 { }
465}; 473};
466 474
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cb42109a55b4..5d7ba1a449bd 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -969,15 +969,11 @@ void __init setup_arch(char **cmdline_p)
969#endif 969#endif
970 970
971 initmem_init(0, max_pfn, acpi, k8); 971 initmem_init(0, max_pfn, acpi, k8);
972#ifndef CONFIG_NO_BOOTMEM
973 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
974#endif
972 975
973#ifdef CONFIG_X86_64
974 /*
975 * dma32_reserve_bootmem() allocates bootmem which may conflict
976 * with the crashkernel command line, so do that after
977 * reserve_crashkernel()
978 */
979 dma32_reserve_bootmem(); 976 dma32_reserve_bootmem();
980#endif
981 977
982 reserve_ibft_region(); 978 reserve_ibft_region();
983 979
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 35abcb8b00e9..ef6370b00e70 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -137,7 +137,13 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
137 137
138static void __init pcpu_fc_free(void *ptr, size_t size) 138static void __init pcpu_fc_free(void *ptr, size_t size)
139{ 139{
140#ifdef CONFIG_NO_BOOTMEM
141 u64 start = __pa(ptr);
142 u64 end = start + size;
143 free_early_partial(start, end);
144#else
140 free_bootmem(__pa(ptr), size); 145 free_bootmem(__pa(ptr), size);
146#endif
141} 147}
142 148
143static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 149static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9b4401115ea1..a435c76d714e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -241,6 +241,11 @@ static void __cpuinit smp_callin(void)
241 map_cpu_to_logical_apicid(); 241 map_cpu_to_logical_apicid();
242 242
243 notify_cpu_starting(cpuid); 243 notify_cpu_starting(cpuid);
244
245 /*
246 * Need to setup vector mappings before we enable interrupts.
247 */
248 __setup_vector_irq(smp_processor_id());
244 /* 249 /*
245 * Get our bogomips. 250 * Get our bogomips.
246 * 251 *
@@ -315,7 +320,6 @@ notrace static void __cpuinit start_secondary(void *unused)
315 */ 320 */
316 ipi_call_lock(); 321 ipi_call_lock();
317 lock_vector_lock(); 322 lock_vector_lock();
318 __setup_vector_irq(smp_processor_id());
319 set_cpu_online(smp_processor_id(), true); 323 set_cpu_online(smp_processor_id(), true);
320 unlock_vector_lock(); 324 unlock_vector_lock();
321 ipi_call_unlock(); 325 ipi_call_unlock();
@@ -1212,11 +1216,12 @@ __init void prefill_possible_map(void)
1212 1216
1213 total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1217 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1214 1218
1215 if (possible > CONFIG_NR_CPUS) { 1219 /* nr_cpu_ids could be reduced via nr_cpus= */
1220 if (possible > nr_cpu_ids) {
1216 printk(KERN_WARNING 1221 printk(KERN_WARNING
1217 "%d Processors exceeds NR_CPUS limit of %d\n", 1222 "%d Processors exceeds NR_CPUS limit of %d\n",
1218 possible, CONFIG_NR_CPUS); 1223 possible, nr_cpu_ids);
1219 possible = CONFIG_NR_CPUS; 1224 possible = nr_cpu_ids;
1220 } 1225 }
1221 1226
1222 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1227 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be2573448ed9..fb5cc5e14cfa 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -70,11 +70,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
70 * manually to deassert NMI lines for the watchdog if run 70 * manually to deassert NMI lines for the watchdog if run
71 * on an 82489DX-based system. 71 * on an 82489DX-based system.
72 */ 72 */
73 spin_lock(&i8259A_lock); 73 raw_spin_lock(&i8259A_lock);
74 outb(0x0c, PIC_MASTER_OCW3); 74 outb(0x0c, PIC_MASTER_OCW3);
75 /* Ack the IRQ; AEOI will end it automatically. */ 75 /* Ack the IRQ; AEOI will end it automatically. */
76 inb(PIC_MASTER_POLL); 76 inb(PIC_MASTER_POLL);
77 spin_unlock(&i8259A_lock); 77 raw_spin_unlock(&i8259A_lock);
78 } 78 }
79 79
80 global_clock_event->event_handler(global_clock_event); 80 global_clock_event->event_handler(global_clock_event);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 34a279a7471d..ab38ce0984fa 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -559,7 +559,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
559 struct irq_desc *desc; 559 struct irq_desc *desc;
560 unsigned long flags; 560 unsigned long flags;
561 561
562 spin_lock_irqsave(&i8259A_lock, flags); 562 raw_spin_lock_irqsave(&i8259A_lock, flags);
563 563
564 /* Find out what's interrupting in the PIIX4 master 8259 */ 564 /* Find out what's interrupting in the PIIX4 master 8259 */
565 outb(0x0c, 0x20); /* OCW3 Poll command */ 565 outb(0x0c, 0x20); /* OCW3 Poll command */
@@ -596,7 +596,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
596 outb(0x60 + realirq, 0x20); 596 outb(0x60 + realirq, 0x20);
597 } 597 }
598 598
599 spin_unlock_irqrestore(&i8259A_lock, flags); 599 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
600 600
601 desc = irq_to_desc(realirq); 601 desc = irq_to_desc(realirq);
602 602
@@ -614,7 +614,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
614 return IRQ_HANDLED; 614 return IRQ_HANDLED;
615 615
616out_unlock: 616out_unlock:
617 spin_unlock_irqrestore(&i8259A_lock, flags); 617 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
618 return IRQ_NONE; 618 return IRQ_NONE;
619} 619}
620 620
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index d430e4c30193..7dd599deca4a 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -33,6 +33,7 @@
33#include <asm/fixmap.h> 33#include <asm/fixmap.h>
34#include <asm/apicdef.h> 34#include <asm/apicdef.h>
35#include <asm/apic.h> 35#include <asm/apic.h>
36#include <asm/pgalloc.h>
36#include <asm/processor.h> 37#include <asm/processor.h>
37#include <asm/timer.h> 38#include <asm/timer.h>
38#include <asm/vmi_time.h> 39#include <asm/vmi_time.h>
@@ -266,30 +267,6 @@ static void vmi_nop(void)
266{ 267{
267} 268}
268 269
269#ifdef CONFIG_HIGHPTE
270static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
271{
272 void *va = kmap_atomic(page, type);
273
274 /*
275 * Internally, the VMI ROM must map virtual addresses to physical
276 * addresses for processing MMU updates. By the time MMU updates
277 * are issued, this information is typically already lost.
278 * Fortunately, the VMI provides a cache of mapping slots for active
279 * page tables.
280 *
281 * We use slot zero for the linear mapping of physical memory, and
282 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
283 *
284 * args: SLOT VA COUNT PFN
285 */
286 BUG_ON(type != KM_PTE0 && type != KM_PTE1);
287 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
288
289 return va;
290}
291#endif
292
293static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) 270static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
294{ 271{
295 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 272 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -640,6 +617,12 @@ static inline int __init activate_vmi(void)
640 u64 reloc; 617 u64 reloc;
641 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; 618 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
642 619
620 /*
621 * Prevent page tables from being allocated in highmem, even if
622 * CONFIG_HIGHPTE is enabled.
623 */
624 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
625
643 if (call_vrom_func(vmi_rom, vmi_init) != 0) { 626 if (call_vrom_func(vmi_rom, vmi_init) != 0) {
644 printk(KERN_ERR "VMI ROM failed to initialize!"); 627 printk(KERN_ERR "VMI ROM failed to initialize!");
645 return 0; 628 return 0;
@@ -778,10 +761,6 @@ static inline int __init activate_vmi(void)
778 761
779 /* Set linear is needed in all cases */ 762 /* Set linear is needed in all cases */
780 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); 763 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
781#ifdef CONFIG_HIGHPTE
782 if (vmi_ops.set_linear_mapping)
783 pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
784#endif
785 764
786 /* 765 /*
787 * These MUST always be patched. Don't support indirect jumps 766 * These MUST always be patched. Don't support indirect jumps
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb194df..2f1ca5614292 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
79 79
80static inline unsigned int vmi_get_timer_vector(void) 80static inline unsigned int vmi_get_timer_vector(void)
81{ 81{
82#ifdef CONFIG_X86_IO_APIC 82 return IRQ0_VECTOR;
83 return FIRST_DEVICE_VECTOR;
84#else
85 return FIRST_EXTERNAL_VECTOR;
86#endif
87} 83}
88 84
89/** vmi clockchip */ 85/** vmi clockchip */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index f92a0da608cb..44879df55696 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -341,7 +341,7 @@ SECTIONS
341 * Per-cpu symbols which need to be offset from __per_cpu_load 341 * Per-cpu symbols which need to be offset from __per_cpu_load
342 * for the boot processor. 342 * for the boot processor.
343 */ 343 */
344#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load 344#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
345INIT_PER_CPU(gdt_page); 345INIT_PER_CPU(gdt_page);
346INIT_PER_CPU(irq_stack_union); 346INIT_PER_CPU(irq_stack_union);
347 347
@@ -352,7 +352,7 @@ INIT_PER_CPU(irq_stack_union);
352 "kernel image bigger than KERNEL_IMAGE_SIZE"); 352 "kernel image bigger than KERNEL_IMAGE_SIZE");
353 353
354#ifdef CONFIG_SMP 354#ifdef CONFIG_SMP
355. = ASSERT((per_cpu__irq_stack_union == 0), 355. = ASSERT((irq_stack_union == 0),
356 "irq_stack_union is not at start of per-cpu area"); 356 "irq_stack_union is not at start of per-cpu area");
357#endif 357#endif
358 358
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9055e5872ff0..1c0c6ab9c60f 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -301,7 +301,8 @@ static int __init vsyscall_init(void)
301 register_sysctl_table(kernel_root_table2); 301 register_sysctl_table(kernel_root_table2);
302#endif 302#endif
303 on_each_cpu(cpu_vsyscall_init, NULL, 1); 303 on_each_cpu(cpu_vsyscall_init, NULL, 1);
304 hotcpu_notifier(cpu_vsyscall_notifier, 0); 304 /* notifier priority > KVM */
305 hotcpu_notifier(cpu_vsyscall_notifier, 30);
305 return 0; 306 return 0;
306} 307}
307 308
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 3c4d0109ad20..970bbd479516 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -29,6 +29,7 @@ config KVM
29 select HAVE_KVM_EVENTFD 29 select HAVE_KVM_EVENTFD
30 select KVM_APIC_ARCHITECTURE 30 select KVM_APIC_ARCHITECTURE
31 select USER_RETURN_NOTIFIER 31 select USER_RETURN_NOTIFIER
32 select KVM_MMIO
32 ---help--- 33 ---help---
33 Support hosting fully virtualized guest machines using hardware 34 Support hosting fully virtualized guest machines using hardware
34 virtualization extensions. You will need a fairly recent 35 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 7e8faea4651e..4dade6ac0827 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -32,7 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <asm/kvm_emulate.h> 33#include <asm/kvm_emulate.h>
34 34
35#include "mmu.h" /* for is_long_mode() */ 35#include "x86.h"
36 36
37/* 37/*
38 * Opcode effective-address decode tables. 38 * Opcode effective-address decode tables.
@@ -76,6 +76,8 @@
76#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ 76#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
77#define GroupMask 0xff /* Group number stored in bits 0:7 */ 77#define GroupMask 0xff /* Group number stored in bits 0:7 */
78/* Misc flags */ 78/* Misc flags */
79#define Lock (1<<26) /* lock prefix is allowed for the instruction */
80#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
79#define No64 (1<<28) 81#define No64 (1<<28)
80/* Source 2 operand type */ 82/* Source 2 operand type */
81#define Src2None (0<<29) 83#define Src2None (0<<29)
@@ -88,39 +90,40 @@
88enum { 90enum {
89 Group1_80, Group1_81, Group1_82, Group1_83, 91 Group1_80, Group1_81, Group1_82, Group1_83,
90 Group1A, Group3_Byte, Group3, Group4, Group5, Group7, 92 Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
93 Group8, Group9,
91}; 94};
92 95
93static u32 opcode_table[256] = { 96static u32 opcode_table[256] = {
94 /* 0x00 - 0x07 */ 97 /* 0x00 - 0x07 */
95 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 98 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
96 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 99 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
97 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 100 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
98 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, 101 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
99 /* 0x08 - 0x0F */ 102 /* 0x08 - 0x0F */
100 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 103 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
101 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 104 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
102 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 105 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
103 ImplicitOps | Stack | No64, 0, 106 ImplicitOps | Stack | No64, 0,
104 /* 0x10 - 0x17 */ 107 /* 0x10 - 0x17 */
105 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 108 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
106 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 109 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
107 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 110 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
108 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, 111 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
109 /* 0x18 - 0x1F */ 112 /* 0x18 - 0x1F */
110 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 113 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
111 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 114 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
112 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 115 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
113 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, 116 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
114 /* 0x20 - 0x27 */ 117 /* 0x20 - 0x27 */
115 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 118 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
116 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 119 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
117 DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, 120 DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
118 /* 0x28 - 0x2F */ 121 /* 0x28 - 0x2F */
119 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 122 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
120 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 123 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
121 0, 0, 0, 0, 124 0, 0, 0, 0,
122 /* 0x30 - 0x37 */ 125 /* 0x30 - 0x37 */
123 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 126 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
124 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 127 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
125 0, 0, 0, 0, 128 0, 0, 0, 0,
126 /* 0x38 - 0x3F */ 129 /* 0x38 - 0x3F */
@@ -156,7 +159,7 @@ static u32 opcode_table[256] = {
156 Group | Group1_80, Group | Group1_81, 159 Group | Group1_80, Group | Group1_81,
157 Group | Group1_82, Group | Group1_83, 160 Group | Group1_82, Group | Group1_83,
158 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 161 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
159 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 162 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
160 /* 0x88 - 0x8F */ 163 /* 0x88 - 0x8F */
161 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, 164 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
162 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, 165 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -210,7 +213,7 @@ static u32 opcode_table[256] = {
210 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, 213 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
211 /* 0xF0 - 0xF7 */ 214 /* 0xF0 - 0xF7 */
212 0, 0, 0, 0, 215 0, 0, 0, 0,
213 ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3, 216 ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
214 /* 0xF8 - 0xFF */ 217 /* 0xF8 - 0xFF */
215 ImplicitOps, 0, ImplicitOps, ImplicitOps, 218 ImplicitOps, 0, ImplicitOps, ImplicitOps,
216 ImplicitOps, ImplicitOps, Group | Group4, Group | Group5, 219 ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
@@ -218,16 +221,20 @@ static u32 opcode_table[256] = {
218 221
219static u32 twobyte_table[256] = { 222static u32 twobyte_table[256] = {
220 /* 0x00 - 0x0F */ 223 /* 0x00 - 0x0F */
221 0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0, 224 0, Group | GroupDual | Group7, 0, 0,
222 ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 225 0, ImplicitOps, ImplicitOps | Priv, 0,
226 ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
227 0, ImplicitOps | ModRM, 0, 0,
223 /* 0x10 - 0x1F */ 228 /* 0x10 - 0x1F */
224 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 229 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
225 /* 0x20 - 0x2F */ 230 /* 0x20 - 0x2F */
226 ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0, 231 ModRM | ImplicitOps | Priv, ModRM | Priv,
232 ModRM | ImplicitOps | Priv, ModRM | Priv,
233 0, 0, 0, 0,
227 0, 0, 0, 0, 0, 0, 0, 0, 234 0, 0, 0, 0, 0, 0, 0, 0,
228 /* 0x30 - 0x3F */ 235 /* 0x30 - 0x3F */
229 ImplicitOps, 0, ImplicitOps, 0, 236 ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
230 ImplicitOps, ImplicitOps, 0, 0, 237 ImplicitOps, ImplicitOps | Priv, 0, 0,
231 0, 0, 0, 0, 0, 0, 0, 0, 238 0, 0, 0, 0, 0, 0, 0, 0,
232 /* 0x40 - 0x47 */ 239 /* 0x40 - 0x47 */
233 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, 240 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -257,21 +264,23 @@ static u32 twobyte_table[256] = {
257 DstMem | SrcReg | Src2CL | ModRM, 0, 0, 264 DstMem | SrcReg | Src2CL | ModRM, 0, 0,
258 /* 0xA8 - 0xAF */ 265 /* 0xA8 - 0xAF */
259 ImplicitOps | Stack, ImplicitOps | Stack, 266 ImplicitOps | Stack, ImplicitOps | Stack,
260 0, DstMem | SrcReg | ModRM | BitOp, 267 0, DstMem | SrcReg | ModRM | BitOp | Lock,
261 DstMem | SrcReg | Src2ImmByte | ModRM, 268 DstMem | SrcReg | Src2ImmByte | ModRM,
262 DstMem | SrcReg | Src2CL | ModRM, 269 DstMem | SrcReg | Src2CL | ModRM,
263 ModRM, 0, 270 ModRM, 0,
264 /* 0xB0 - 0xB7 */ 271 /* 0xB0 - 0xB7 */
265 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0, 272 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
266 DstMem | SrcReg | ModRM | BitOp, 273 0, DstMem | SrcReg | ModRM | BitOp | Lock,
267 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, 274 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
268 DstReg | SrcMem16 | ModRM | Mov, 275 DstReg | SrcMem16 | ModRM | Mov,
269 /* 0xB8 - 0xBF */ 276 /* 0xB8 - 0xBF */
270 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp, 277 0, 0,
278 Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock,
271 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, 279 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
272 DstReg | SrcMem16 | ModRM | Mov, 280 DstReg | SrcMem16 | ModRM | Mov,
273 /* 0xC0 - 0xCF */ 281 /* 0xC0 - 0xCF */
274 0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM, 282 0, 0, 0, DstMem | SrcReg | ModRM | Mov,
283 0, 0, 0, Group | GroupDual | Group9,
275 0, 0, 0, 0, 0, 0, 0, 0, 284 0, 0, 0, 0, 0, 0, 0, 0,
276 /* 0xD0 - 0xDF */ 285 /* 0xD0 - 0xDF */
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -283,25 +292,41 @@ static u32 twobyte_table[256] = {
283 292
284static u32 group_table[] = { 293static u32 group_table[] = {
285 [Group1_80*8] = 294 [Group1_80*8] =
286 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 295 ByteOp | DstMem | SrcImm | ModRM | Lock,
287 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 296 ByteOp | DstMem | SrcImm | ModRM | Lock,
288 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 297 ByteOp | DstMem | SrcImm | ModRM | Lock,
289 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 298 ByteOp | DstMem | SrcImm | ModRM | Lock,
299 ByteOp | DstMem | SrcImm | ModRM | Lock,
300 ByteOp | DstMem | SrcImm | ModRM | Lock,
301 ByteOp | DstMem | SrcImm | ModRM | Lock,
302 ByteOp | DstMem | SrcImm | ModRM,
290 [Group1_81*8] = 303 [Group1_81*8] =
291 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, 304 DstMem | SrcImm | ModRM | Lock,
292 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, 305 DstMem | SrcImm | ModRM | Lock,
293 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, 306 DstMem | SrcImm | ModRM | Lock,
294 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, 307 DstMem | SrcImm | ModRM | Lock,
308 DstMem | SrcImm | ModRM | Lock,
309 DstMem | SrcImm | ModRM | Lock,
310 DstMem | SrcImm | ModRM | Lock,
311 DstMem | SrcImm | ModRM,
295 [Group1_82*8] = 312 [Group1_82*8] =
296 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 313 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
297 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 314 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
298 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 315 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
299 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, 316 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
317 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
318 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
319 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
320 ByteOp | DstMem | SrcImm | ModRM | No64,
300 [Group1_83*8] = 321 [Group1_83*8] =
301 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, 322 DstMem | SrcImmByte | ModRM | Lock,
302 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, 323 DstMem | SrcImmByte | ModRM | Lock,
303 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, 324 DstMem | SrcImmByte | ModRM | Lock,
304 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM, 325 DstMem | SrcImmByte | ModRM | Lock,
326 DstMem | SrcImmByte | ModRM | Lock,
327 DstMem | SrcImmByte | ModRM | Lock,
328 DstMem | SrcImmByte | ModRM | Lock,
329 DstMem | SrcImmByte | ModRM,
305 [Group1A*8] = 330 [Group1A*8] =
306 DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0, 331 DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
307 [Group3_Byte*8] = 332 [Group3_Byte*8] =
@@ -320,24 +345,39 @@ static u32 group_table[] = {
320 SrcMem | ModRM | Stack, 0, 345 SrcMem | ModRM | Stack, 0,
321 SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0, 346 SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
322 [Group7*8] = 347 [Group7*8] =
323 0, 0, ModRM | SrcMem, ModRM | SrcMem, 348 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
324 SrcNone | ModRM | DstMem | Mov, 0, 349 SrcNone | ModRM | DstMem | Mov, 0,
325 SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp, 350 SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
351 [Group8*8] =
352 0, 0, 0, 0,
353 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
354 DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
355 [Group9*8] =
356 0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0,
326}; 357};
327 358
328static u32 group2_table[] = { 359static u32 group2_table[] = {
329 [Group7*8] = 360 [Group7*8] =
330 SrcNone | ModRM, 0, 0, SrcNone | ModRM, 361 SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM,
331 SrcNone | ModRM | DstMem | Mov, 0, 362 SrcNone | ModRM | DstMem | Mov, 0,
332 SrcMem16 | ModRM | Mov, 0, 363 SrcMem16 | ModRM | Mov, 0,
364 [Group9*8] =
365 0, 0, 0, 0, 0, 0, 0, 0,
333}; 366};
334 367
335/* EFLAGS bit definitions. */ 368/* EFLAGS bit definitions. */
369#define EFLG_ID (1<<21)
370#define EFLG_VIP (1<<20)
371#define EFLG_VIF (1<<19)
372#define EFLG_AC (1<<18)
336#define EFLG_VM (1<<17) 373#define EFLG_VM (1<<17)
337#define EFLG_RF (1<<16) 374#define EFLG_RF (1<<16)
375#define EFLG_IOPL (3<<12)
376#define EFLG_NT (1<<14)
338#define EFLG_OF (1<<11) 377#define EFLG_OF (1<<11)
339#define EFLG_DF (1<<10) 378#define EFLG_DF (1<<10)
340#define EFLG_IF (1<<9) 379#define EFLG_IF (1<<9)
380#define EFLG_TF (1<<8)
341#define EFLG_SF (1<<7) 381#define EFLG_SF (1<<7)
342#define EFLG_ZF (1<<6) 382#define EFLG_ZF (1<<6)
343#define EFLG_AF (1<<4) 383#define EFLG_AF (1<<4)
@@ -606,7 +646,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
606 646
607 if (linear < fc->start || linear >= fc->end) { 647 if (linear < fc->start || linear >= fc->end) {
608 size = min(15UL, PAGE_SIZE - offset_in_page(linear)); 648 size = min(15UL, PAGE_SIZE - offset_in_page(linear));
609 rc = ops->read_std(linear, fc->data, size, ctxt->vcpu); 649 rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
610 if (rc) 650 if (rc)
611 return rc; 651 return rc;
612 fc->start = linear; 652 fc->start = linear;
@@ -661,11 +701,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
661 op_bytes = 3; 701 op_bytes = 3;
662 *address = 0; 702 *address = 0;
663 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, 703 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
664 ctxt->vcpu); 704 ctxt->vcpu, NULL);
665 if (rc) 705 if (rc)
666 return rc; 706 return rc;
667 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, 707 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
668 ctxt->vcpu); 708 ctxt->vcpu, NULL);
669 return rc; 709 return rc;
670} 710}
671 711
@@ -889,6 +929,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
889 929
890 switch (mode) { 930 switch (mode) {
891 case X86EMUL_MODE_REAL: 931 case X86EMUL_MODE_REAL:
932 case X86EMUL_MODE_VM86:
892 case X86EMUL_MODE_PROT16: 933 case X86EMUL_MODE_PROT16:
893 def_op_bytes = def_ad_bytes = 2; 934 def_op_bytes = def_ad_bytes = 2;
894 break; 935 break;
@@ -975,7 +1016,7 @@ done_prefixes:
975 } 1016 }
976 1017
977 if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { 1018 if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
978 kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");; 1019 kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");
979 return -1; 1020 return -1;
980 } 1021 }
981 1022
@@ -1196,13 +1237,56 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1196 rc = ops->read_emulated(register_address(c, ss_base(ctxt), 1237 rc = ops->read_emulated(register_address(c, ss_base(ctxt),
1197 c->regs[VCPU_REGS_RSP]), 1238 c->regs[VCPU_REGS_RSP]),
1198 dest, len, ctxt->vcpu); 1239 dest, len, ctxt->vcpu);
1199 if (rc != 0) 1240 if (rc != X86EMUL_CONTINUE)
1200 return rc; 1241 return rc;
1201 1242
1202 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len); 1243 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1203 return rc; 1244 return rc;
1204} 1245}
1205 1246
1247static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1248 struct x86_emulate_ops *ops,
1249 void *dest, int len)
1250{
1251 int rc;
1252 unsigned long val, change_mask;
1253 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1254 int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
1255
1256 rc = emulate_pop(ctxt, ops, &val, len);
1257 if (rc != X86EMUL_CONTINUE)
1258 return rc;
1259
1260 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1261 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1262
1263 switch(ctxt->mode) {
1264 case X86EMUL_MODE_PROT64:
1265 case X86EMUL_MODE_PROT32:
1266 case X86EMUL_MODE_PROT16:
1267 if (cpl == 0)
1268 change_mask |= EFLG_IOPL;
1269 if (cpl <= iopl)
1270 change_mask |= EFLG_IF;
1271 break;
1272 case X86EMUL_MODE_VM86:
1273 if (iopl < 3) {
1274 kvm_inject_gp(ctxt->vcpu, 0);
1275 return X86EMUL_PROPAGATE_FAULT;
1276 }
1277 change_mask |= EFLG_IF;
1278 break;
1279 default: /* real mode */
1280 change_mask |= (EFLG_IOPL | EFLG_IF);
1281 break;
1282 }
1283
1284 *(unsigned long *)dest =
1285 (ctxt->eflags & ~change_mask) | (val & change_mask);
1286
1287 return rc;
1288}
1289
1206static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg) 1290static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1207{ 1291{
1208 struct decode_cache *c = &ctxt->decode; 1292 struct decode_cache *c = &ctxt->decode;
@@ -1225,7 +1309,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1225 if (rc != 0) 1309 if (rc != 0)
1226 return rc; 1310 return rc;
1227 1311
1228 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg); 1312 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
1229 return rc; 1313 return rc;
1230} 1314}
1231 1315
@@ -1370,7 +1454,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1370 int rc; 1454 int rc;
1371 1455
1372 rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu); 1456 rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
1373 if (rc != 0) 1457 if (rc != X86EMUL_CONTINUE)
1374 return rc; 1458 return rc;
1375 1459
1376 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || 1460 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
@@ -1385,7 +1469,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1385 (u32) c->regs[VCPU_REGS_RBX]; 1469 (u32) c->regs[VCPU_REGS_RBX];
1386 1470
1387 rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu); 1471 rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
1388 if (rc != 0) 1472 if (rc != X86EMUL_CONTINUE)
1389 return rc; 1473 return rc;
1390 ctxt->eflags |= EFLG_ZF; 1474 ctxt->eflags |= EFLG_ZF;
1391 } 1475 }
@@ -1407,7 +1491,7 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1407 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); 1491 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1408 if (rc) 1492 if (rc)
1409 return rc; 1493 return rc;
1410 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS); 1494 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
1411 return rc; 1495 return rc;
1412} 1496}
1413 1497
@@ -1451,7 +1535,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1451 &c->dst.val, 1535 &c->dst.val,
1452 c->dst.bytes, 1536 c->dst.bytes,
1453 ctxt->vcpu); 1537 ctxt->vcpu);
1454 if (rc != 0) 1538 if (rc != X86EMUL_CONTINUE)
1455 return rc; 1539 return rc;
1456 break; 1540 break;
1457 case OP_NONE: 1541 case OP_NONE:
@@ -1514,9 +1598,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
1514 u64 msr_data; 1598 u64 msr_data;
1515 1599
1516 /* syscall is not available in real mode */ 1600 /* syscall is not available in real mode */
1517 if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL 1601 if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86)
1518 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) 1602 return X86EMUL_UNHANDLEABLE;
1519 return -1;
1520 1603
1521 setup_syscalls_segments(ctxt, &cs, &ss); 1604 setup_syscalls_segments(ctxt, &cs, &ss);
1522 1605
@@ -1553,7 +1636,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
1553 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); 1636 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1554 } 1637 }
1555 1638
1556 return 0; 1639 return X86EMUL_CONTINUE;
1557} 1640}
1558 1641
1559static int 1642static int
@@ -1563,22 +1646,17 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1563 struct kvm_segment cs, ss; 1646 struct kvm_segment cs, ss;
1564 u64 msr_data; 1647 u64 msr_data;
1565 1648
1566 /* inject #UD if LOCK prefix is used */ 1649 /* inject #GP if in real mode */
1567 if (c->lock_prefix) 1650 if (ctxt->mode == X86EMUL_MODE_REAL) {
1568 return -1;
1569
1570 /* inject #GP if in real mode or paging is disabled */
1571 if (ctxt->mode == X86EMUL_MODE_REAL ||
1572 !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1573 kvm_inject_gp(ctxt->vcpu, 0); 1651 kvm_inject_gp(ctxt->vcpu, 0);
1574 return -1; 1652 return X86EMUL_UNHANDLEABLE;
1575 } 1653 }
1576 1654
1577 /* XXX sysenter/sysexit have not been tested in 64bit mode. 1655 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1578 * Therefore, we inject an #UD. 1656 * Therefore, we inject an #UD.
1579 */ 1657 */
1580 if (ctxt->mode == X86EMUL_MODE_PROT64) 1658 if (ctxt->mode == X86EMUL_MODE_PROT64)
1581 return -1; 1659 return X86EMUL_UNHANDLEABLE;
1582 1660
1583 setup_syscalls_segments(ctxt, &cs, &ss); 1661 setup_syscalls_segments(ctxt, &cs, &ss);
1584 1662
@@ -1587,13 +1665,13 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1587 case X86EMUL_MODE_PROT32: 1665 case X86EMUL_MODE_PROT32:
1588 if ((msr_data & 0xfffc) == 0x0) { 1666 if ((msr_data & 0xfffc) == 0x0) {
1589 kvm_inject_gp(ctxt->vcpu, 0); 1667 kvm_inject_gp(ctxt->vcpu, 0);
1590 return -1; 1668 return X86EMUL_PROPAGATE_FAULT;
1591 } 1669 }
1592 break; 1670 break;
1593 case X86EMUL_MODE_PROT64: 1671 case X86EMUL_MODE_PROT64:
1594 if (msr_data == 0x0) { 1672 if (msr_data == 0x0) {
1595 kvm_inject_gp(ctxt->vcpu, 0); 1673 kvm_inject_gp(ctxt->vcpu, 0);
1596 return -1; 1674 return X86EMUL_PROPAGATE_FAULT;
1597 } 1675 }
1598 break; 1676 break;
1599 } 1677 }
@@ -1618,7 +1696,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1618 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data); 1696 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1619 c->regs[VCPU_REGS_RSP] = msr_data; 1697 c->regs[VCPU_REGS_RSP] = msr_data;
1620 1698
1621 return 0; 1699 return X86EMUL_CONTINUE;
1622} 1700}
1623 1701
1624static int 1702static int
@@ -1629,21 +1707,11 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1629 u64 msr_data; 1707 u64 msr_data;
1630 int usermode; 1708 int usermode;
1631 1709
1632 /* inject #UD if LOCK prefix is used */ 1710 /* inject #GP if in real mode or Virtual 8086 mode */
1633 if (c->lock_prefix) 1711 if (ctxt->mode == X86EMUL_MODE_REAL ||
1634 return -1; 1712 ctxt->mode == X86EMUL_MODE_VM86) {
1635
1636 /* inject #GP if in real mode or paging is disabled */
1637 if (ctxt->mode == X86EMUL_MODE_REAL
1638 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1639 kvm_inject_gp(ctxt->vcpu, 0);
1640 return -1;
1641 }
1642
1643 /* sysexit must be called from CPL 0 */
1644 if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
1645 kvm_inject_gp(ctxt->vcpu, 0); 1713 kvm_inject_gp(ctxt->vcpu, 0);
1646 return -1; 1714 return X86EMUL_UNHANDLEABLE;
1647 } 1715 }
1648 1716
1649 setup_syscalls_segments(ctxt, &cs, &ss); 1717 setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1661,7 +1729,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1661 cs.selector = (u16)(msr_data + 16); 1729 cs.selector = (u16)(msr_data + 16);
1662 if ((msr_data & 0xfffc) == 0x0) { 1730 if ((msr_data & 0xfffc) == 0x0) {
1663 kvm_inject_gp(ctxt->vcpu, 0); 1731 kvm_inject_gp(ctxt->vcpu, 0);
1664 return -1; 1732 return X86EMUL_PROPAGATE_FAULT;
1665 } 1733 }
1666 ss.selector = (u16)(msr_data + 24); 1734 ss.selector = (u16)(msr_data + 24);
1667 break; 1735 break;
@@ -1669,7 +1737,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1669 cs.selector = (u16)(msr_data + 32); 1737 cs.selector = (u16)(msr_data + 32);
1670 if (msr_data == 0x0) { 1738 if (msr_data == 0x0) {
1671 kvm_inject_gp(ctxt->vcpu, 0); 1739 kvm_inject_gp(ctxt->vcpu, 0);
1672 return -1; 1740 return X86EMUL_PROPAGATE_FAULT;
1673 } 1741 }
1674 ss.selector = cs.selector + 8; 1742 ss.selector = cs.selector + 8;
1675 cs.db = 0; 1743 cs.db = 0;
@@ -1685,7 +1753,58 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1685 c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX]; 1753 c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
1686 c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX]; 1754 c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];
1687 1755
1688 return 0; 1756 return X86EMUL_CONTINUE;
1757}
1758
1759static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
1760{
1761 int iopl;
1762 if (ctxt->mode == X86EMUL_MODE_REAL)
1763 return false;
1764 if (ctxt->mode == X86EMUL_MODE_VM86)
1765 return true;
1766 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1767 return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
1768}
1769
1770static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1771 struct x86_emulate_ops *ops,
1772 u16 port, u16 len)
1773{
1774 struct kvm_segment tr_seg;
1775 int r;
1776 u16 io_bitmap_ptr;
1777 u8 perm, bit_idx = port & 0x7;
1778 unsigned mask = (1 << len) - 1;
1779
1780 kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR);
1781 if (tr_seg.unusable)
1782 return false;
1783 if (tr_seg.limit < 103)
1784 return false;
1785 r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu,
1786 NULL);
1787 if (r != X86EMUL_CONTINUE)
1788 return false;
1789 if (io_bitmap_ptr + port/8 > tr_seg.limit)
1790 return false;
1791 r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1,
1792 ctxt->vcpu, NULL);
1793 if (r != X86EMUL_CONTINUE)
1794 return false;
1795 if ((perm >> bit_idx) & mask)
1796 return false;
1797 return true;
1798}
1799
1800static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1801 struct x86_emulate_ops *ops,
1802 u16 port, u16 len)
1803{
1804 if (emulator_bad_iopl(ctxt))
1805 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1806 return false;
1807 return true;
1689} 1808}
1690 1809
1691int 1810int
@@ -1709,6 +1828,18 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1709 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); 1828 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
1710 saved_eip = c->eip; 1829 saved_eip = c->eip;
1711 1830
1831 /* LOCK prefix is allowed only with some instructions */
1832 if (c->lock_prefix && !(c->d & Lock)) {
1833 kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
1834 goto done;
1835 }
1836
1837 /* Privileged instruction can be executed only in CPL=0 */
1838 if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) {
1839 kvm_inject_gp(ctxt->vcpu, 0);
1840 goto done;
1841 }
1842
1712 if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs)) 1843 if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
1713 memop = c->modrm_ea; 1844 memop = c->modrm_ea;
1714 1845
@@ -1749,7 +1880,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1749 &c->src.val, 1880 &c->src.val,
1750 c->src.bytes, 1881 c->src.bytes,
1751 ctxt->vcpu); 1882 ctxt->vcpu);
1752 if (rc != 0) 1883 if (rc != X86EMUL_CONTINUE)
1753 goto done; 1884 goto done;
1754 c->src.orig_val = c->src.val; 1885 c->src.orig_val = c->src.val;
1755 } 1886 }
@@ -1768,12 +1899,15 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1768 c->dst.ptr = (void *)c->dst.ptr + 1899 c->dst.ptr = (void *)c->dst.ptr +
1769 (c->src.val & mask) / 8; 1900 (c->src.val & mask) / 8;
1770 } 1901 }
1771 if (!(c->d & Mov) && 1902 if (!(c->d & Mov)) {
1772 /* optimisation - avoid slow emulated read */ 1903 /* optimisation - avoid slow emulated read */
1773 ((rc = ops->read_emulated((unsigned long)c->dst.ptr, 1904 rc = ops->read_emulated((unsigned long)c->dst.ptr,
1774 &c->dst.val, 1905 &c->dst.val,
1775 c->dst.bytes, ctxt->vcpu)) != 0)) 1906 c->dst.bytes,
1776 goto done; 1907 ctxt->vcpu);
1908 if (rc != X86EMUL_CONTINUE)
1909 goto done;
1910 }
1777 } 1911 }
1778 c->dst.orig_val = c->dst.val; 1912 c->dst.orig_val = c->dst.val;
1779 1913
@@ -1876,7 +2010,12 @@ special_insn:
1876 break; 2010 break;
1877 case 0x6c: /* insb */ 2011 case 0x6c: /* insb */
1878 case 0x6d: /* insw/insd */ 2012 case 0x6d: /* insw/insd */
1879 if (kvm_emulate_pio_string(ctxt->vcpu, 2013 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2014 (c->d & ByteOp) ? 1 : c->op_bytes)) {
2015 kvm_inject_gp(ctxt->vcpu, 0);
2016 goto done;
2017 }
2018 if (kvm_emulate_pio_string(ctxt->vcpu,
1880 1, 2019 1,
1881 (c->d & ByteOp) ? 1 : c->op_bytes, 2020 (c->d & ByteOp) ? 1 : c->op_bytes,
1882 c->rep_prefix ? 2021 c->rep_prefix ?
@@ -1892,6 +2031,11 @@ special_insn:
1892 return 0; 2031 return 0;
1893 case 0x6e: /* outsb */ 2032 case 0x6e: /* outsb */
1894 case 0x6f: /* outsw/outsd */ 2033 case 0x6f: /* outsw/outsd */
2034 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2035 (c->d & ByteOp) ? 1 : c->op_bytes)) {
2036 kvm_inject_gp(ctxt->vcpu, 0);
2037 goto done;
2038 }
1895 if (kvm_emulate_pio_string(ctxt->vcpu, 2039 if (kvm_emulate_pio_string(ctxt->vcpu,
1896 0, 2040 0,
1897 (c->d & ByteOp) ? 1 : c->op_bytes, 2041 (c->d & ByteOp) ? 1 : c->op_bytes,
@@ -1978,25 +2122,19 @@ special_insn:
1978 break; 2122 break;
1979 case 0x8e: { /* mov seg, r/m16 */ 2123 case 0x8e: { /* mov seg, r/m16 */
1980 uint16_t sel; 2124 uint16_t sel;
1981 int type_bits;
1982 int err;
1983 2125
1984 sel = c->src.val; 2126 sel = c->src.val;
1985 if (c->modrm_reg == VCPU_SREG_SS)
1986 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
1987 2127
1988 if (c->modrm_reg <= 5) { 2128 if (c->modrm_reg == VCPU_SREG_CS ||
1989 type_bits = (c->modrm_reg == 1) ? 9 : 1; 2129 c->modrm_reg > VCPU_SREG_GS) {
1990 err = kvm_load_segment_descriptor(ctxt->vcpu, sel, 2130 kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
1991 type_bits, c->modrm_reg); 2131 goto done;
1992 } else {
1993 printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
1994 c->modrm);
1995 goto cannot_emulate;
1996 } 2132 }
1997 2133
1998 if (err < 0) 2134 if (c->modrm_reg == VCPU_SREG_SS)
1999 goto cannot_emulate; 2135 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
2136
2137 rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
2000 2138
2001 c->dst.type = OP_NONE; /* Disable writeback. */ 2139 c->dst.type = OP_NONE; /* Disable writeback. */
2002 break; 2140 break;
@@ -2025,7 +2163,10 @@ special_insn:
2025 c->dst.type = OP_REG; 2163 c->dst.type = OP_REG;
2026 c->dst.ptr = (unsigned long *) &ctxt->eflags; 2164 c->dst.ptr = (unsigned long *) &ctxt->eflags;
2027 c->dst.bytes = c->op_bytes; 2165 c->dst.bytes = c->op_bytes;
2028 goto pop_instruction; 2166 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
2167 if (rc != X86EMUL_CONTINUE)
2168 goto done;
2169 break;
2029 case 0xa0 ... 0xa1: /* mov */ 2170 case 0xa0 ... 0xa1: /* mov */
2030 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; 2171 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
2031 c->dst.val = c->src.val; 2172 c->dst.val = c->src.val;
@@ -2039,11 +2180,12 @@ special_insn:
2039 c->dst.ptr = (unsigned long *)register_address(c, 2180 c->dst.ptr = (unsigned long *)register_address(c,
2040 es_base(ctxt), 2181 es_base(ctxt),
2041 c->regs[VCPU_REGS_RDI]); 2182 c->regs[VCPU_REGS_RDI]);
2042 if ((rc = ops->read_emulated(register_address(c, 2183 rc = ops->read_emulated(register_address(c,
2043 seg_override_base(ctxt, c), 2184 seg_override_base(ctxt, c),
2044 c->regs[VCPU_REGS_RSI]), 2185 c->regs[VCPU_REGS_RSI]),
2045 &c->dst.val, 2186 &c->dst.val,
2046 c->dst.bytes, ctxt->vcpu)) != 0) 2187 c->dst.bytes, ctxt->vcpu);
2188 if (rc != X86EMUL_CONTINUE)
2047 goto done; 2189 goto done;
2048 register_address_increment(c, &c->regs[VCPU_REGS_RSI], 2190 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2049 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 2191 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
@@ -2058,10 +2200,11 @@ special_insn:
2058 c->src.ptr = (unsigned long *)register_address(c, 2200 c->src.ptr = (unsigned long *)register_address(c,
2059 seg_override_base(ctxt, c), 2201 seg_override_base(ctxt, c),
2060 c->regs[VCPU_REGS_RSI]); 2202 c->regs[VCPU_REGS_RSI]);
2061 if ((rc = ops->read_emulated((unsigned long)c->src.ptr, 2203 rc = ops->read_emulated((unsigned long)c->src.ptr,
2062 &c->src.val, 2204 &c->src.val,
2063 c->src.bytes, 2205 c->src.bytes,
2064 ctxt->vcpu)) != 0) 2206 ctxt->vcpu);
2207 if (rc != X86EMUL_CONTINUE)
2065 goto done; 2208 goto done;
2066 2209
2067 c->dst.type = OP_NONE; /* Disable writeback. */ 2210 c->dst.type = OP_NONE; /* Disable writeback. */
@@ -2069,10 +2212,11 @@ special_insn:
2069 c->dst.ptr = (unsigned long *)register_address(c, 2212 c->dst.ptr = (unsigned long *)register_address(c,
2070 es_base(ctxt), 2213 es_base(ctxt),
2071 c->regs[VCPU_REGS_RDI]); 2214 c->regs[VCPU_REGS_RDI]);
2072 if ((rc = ops->read_emulated((unsigned long)c->dst.ptr, 2215 rc = ops->read_emulated((unsigned long)c->dst.ptr,
2073 &c->dst.val, 2216 &c->dst.val,
2074 c->dst.bytes, 2217 c->dst.bytes,
2075 ctxt->vcpu)) != 0) 2218 ctxt->vcpu);
2219 if (rc != X86EMUL_CONTINUE)
2076 goto done; 2220 goto done;
2077 2221
2078 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); 2222 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
@@ -2102,12 +2246,13 @@ special_insn:
2102 c->dst.type = OP_REG; 2246 c->dst.type = OP_REG;
2103 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 2247 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2104 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; 2248 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
2105 if ((rc = ops->read_emulated(register_address(c, 2249 rc = ops->read_emulated(register_address(c,
2106 seg_override_base(ctxt, c), 2250 seg_override_base(ctxt, c),
2107 c->regs[VCPU_REGS_RSI]), 2251 c->regs[VCPU_REGS_RSI]),
2108 &c->dst.val, 2252 &c->dst.val,
2109 c->dst.bytes, 2253 c->dst.bytes,
2110 ctxt->vcpu)) != 0) 2254 ctxt->vcpu);
2255 if (rc != X86EMUL_CONTINUE)
2111 goto done; 2256 goto done;
2112 register_address_increment(c, &c->regs[VCPU_REGS_RSI], 2257 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2113 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 2258 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
@@ -2163,11 +2308,9 @@ special_insn:
2163 case 0xe9: /* jmp rel */ 2308 case 0xe9: /* jmp rel */
2164 goto jmp; 2309 goto jmp;
2165 case 0xea: /* jmp far */ 2310 case 0xea: /* jmp far */
2166 if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9, 2311 if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
2167 VCPU_SREG_CS) < 0) { 2312 VCPU_SREG_CS))
2168 DPRINTF("jmp far: Failed to load CS descriptor\n"); 2313 goto done;
2169 goto cannot_emulate;
2170 }
2171 2314
2172 c->eip = c->src.val; 2315 c->eip = c->src.val;
2173 break; 2316 break;
@@ -2185,7 +2328,13 @@ special_insn:
2185 case 0xef: /* out (e/r)ax,dx */ 2328 case 0xef: /* out (e/r)ax,dx */
2186 port = c->regs[VCPU_REGS_RDX]; 2329 port = c->regs[VCPU_REGS_RDX];
2187 io_dir_in = 0; 2330 io_dir_in = 0;
2188 do_io: if (kvm_emulate_pio(ctxt->vcpu, io_dir_in, 2331 do_io:
2332 if (!emulator_io_permited(ctxt, ops, port,
2333 (c->d & ByteOp) ? 1 : c->op_bytes)) {
2334 kvm_inject_gp(ctxt->vcpu, 0);
2335 goto done;
2336 }
2337 if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
2189 (c->d & ByteOp) ? 1 : c->op_bytes, 2338 (c->d & ByteOp) ? 1 : c->op_bytes,
2190 port) != 0) { 2339 port) != 0) {
2191 c->eip = saved_eip; 2340 c->eip = saved_eip;
@@ -2210,13 +2359,21 @@ special_insn:
2210 c->dst.type = OP_NONE; /* Disable writeback. */ 2359 c->dst.type = OP_NONE; /* Disable writeback. */
2211 break; 2360 break;
2212 case 0xfa: /* cli */ 2361 case 0xfa: /* cli */
2213 ctxt->eflags &= ~X86_EFLAGS_IF; 2362 if (emulator_bad_iopl(ctxt))
2214 c->dst.type = OP_NONE; /* Disable writeback. */ 2363 kvm_inject_gp(ctxt->vcpu, 0);
2364 else {
2365 ctxt->eflags &= ~X86_EFLAGS_IF;
2366 c->dst.type = OP_NONE; /* Disable writeback. */
2367 }
2215 break; 2368 break;
2216 case 0xfb: /* sti */ 2369 case 0xfb: /* sti */
2217 toggle_interruptibility(ctxt, X86_SHADOW_INT_STI); 2370 if (emulator_bad_iopl(ctxt))
2218 ctxt->eflags |= X86_EFLAGS_IF; 2371 kvm_inject_gp(ctxt->vcpu, 0);
2219 c->dst.type = OP_NONE; /* Disable writeback. */ 2372 else {
2373 toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
2374 ctxt->eflags |= X86_EFLAGS_IF;
2375 c->dst.type = OP_NONE; /* Disable writeback. */
2376 }
2220 break; 2377 break;
2221 case 0xfc: /* cld */ 2378 case 0xfc: /* cld */
2222 ctxt->eflags &= ~EFLG_DF; 2379 ctxt->eflags &= ~EFLG_DF;
@@ -2319,8 +2476,9 @@ twobyte_insn:
2319 } 2476 }
2320 break; 2477 break;
2321 case 0x05: /* syscall */ 2478 case 0x05: /* syscall */
2322 if (emulate_syscall(ctxt) == -1) 2479 rc = emulate_syscall(ctxt);
2323 goto cannot_emulate; 2480 if (rc != X86EMUL_CONTINUE)
2481 goto done;
2324 else 2482 else
2325 goto writeback; 2483 goto writeback;
2326 break; 2484 break;
@@ -2391,14 +2549,16 @@ twobyte_insn:
2391 c->dst.type = OP_NONE; 2549 c->dst.type = OP_NONE;
2392 break; 2550 break;
2393 case 0x34: /* sysenter */ 2551 case 0x34: /* sysenter */
2394 if (emulate_sysenter(ctxt) == -1) 2552 rc = emulate_sysenter(ctxt);
2395 goto cannot_emulate; 2553 if (rc != X86EMUL_CONTINUE)
2554 goto done;
2396 else 2555 else
2397 goto writeback; 2556 goto writeback;
2398 break; 2557 break;
2399 case 0x35: /* sysexit */ 2558 case 0x35: /* sysexit */
2400 if (emulate_sysexit(ctxt) == -1) 2559 rc = emulate_sysexit(ctxt);
2401 goto cannot_emulate; 2560 if (rc != X86EMUL_CONTINUE)
2561 goto done;
2402 else 2562 else
2403 goto writeback; 2563 goto writeback;
2404 break; 2564 break;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 15578f180e59..294698b6daff 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -242,11 +242,11 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
242{ 242{
243 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 243 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
244 irq_ack_notifier); 244 irq_ack_notifier);
245 spin_lock(&ps->inject_lock); 245 raw_spin_lock(&ps->inject_lock);
246 if (atomic_dec_return(&ps->pit_timer.pending) < 0) 246 if (atomic_dec_return(&ps->pit_timer.pending) < 0)
247 atomic_inc(&ps->pit_timer.pending); 247 atomic_inc(&ps->pit_timer.pending);
248 ps->irq_ack = 1; 248 ps->irq_ack = 1;
249 spin_unlock(&ps->inject_lock); 249 raw_spin_unlock(&ps->inject_lock);
250} 250}
251 251
252void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) 252void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -605,7 +605,7 @@ static const struct kvm_io_device_ops speaker_dev_ops = {
605 .write = speaker_ioport_write, 605 .write = speaker_ioport_write,
606}; 606};
607 607
608/* Caller must have writers lock on slots_lock */ 608/* Caller must hold slots_lock */
609struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) 609struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
610{ 610{
611 struct kvm_pit *pit; 611 struct kvm_pit *pit;
@@ -624,7 +624,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
624 624
625 mutex_init(&pit->pit_state.lock); 625 mutex_init(&pit->pit_state.lock);
626 mutex_lock(&pit->pit_state.lock); 626 mutex_lock(&pit->pit_state.lock);
627 spin_lock_init(&pit->pit_state.inject_lock); 627 raw_spin_lock_init(&pit->pit_state.inject_lock);
628 628
629 kvm->arch.vpit = pit; 629 kvm->arch.vpit = pit;
630 pit->kvm = kvm; 630 pit->kvm = kvm;
@@ -645,13 +645,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
645 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 645 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
646 646
647 kvm_iodevice_init(&pit->dev, &pit_dev_ops); 647 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
648 ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev); 648 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->dev);
649 if (ret < 0) 649 if (ret < 0)
650 goto fail; 650 goto fail;
651 651
652 if (flags & KVM_PIT_SPEAKER_DUMMY) { 652 if (flags & KVM_PIT_SPEAKER_DUMMY) {
653 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); 653 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
654 ret = __kvm_io_bus_register_dev(&kvm->pio_bus, 654 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
655 &pit->speaker_dev); 655 &pit->speaker_dev);
656 if (ret < 0) 656 if (ret < 0)
657 goto fail_unregister; 657 goto fail_unregister;
@@ -660,11 +660,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
660 return pit; 660 return pit;
661 661
662fail_unregister: 662fail_unregister:
663 __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev); 663 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
664 664
665fail: 665fail:
666 if (pit->irq_source_id >= 0) 666 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
667 kvm_free_irq_source_id(kvm, pit->irq_source_id); 667 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
668 kvm_free_irq_source_id(kvm, pit->irq_source_id);
668 669
669 kfree(pit); 670 kfree(pit);
670 return NULL; 671 return NULL;
@@ -723,12 +724,12 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
723 /* Try to inject pending interrupts when 724 /* Try to inject pending interrupts when
724 * last one has been acked. 725 * last one has been acked.
725 */ 726 */
726 spin_lock(&ps->inject_lock); 727 raw_spin_lock(&ps->inject_lock);
727 if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { 728 if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
728 ps->irq_ack = 0; 729 ps->irq_ack = 0;
729 inject = 1; 730 inject = 1;
730 } 731 }
731 spin_unlock(&ps->inject_lock); 732 raw_spin_unlock(&ps->inject_lock);
732 if (inject) 733 if (inject)
733 __inject_pit_timer_intr(kvm); 734 __inject_pit_timer_intr(kvm);
734 } 735 }
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index d4c1c7ffdc09..900d6b0ba7c2 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -27,7 +27,7 @@ struct kvm_kpit_state {
27 u32 speaker_data_on; 27 u32 speaker_data_on;
28 struct mutex lock; 28 struct mutex lock;
29 struct kvm_pit *pit; 29 struct kvm_pit *pit;
30 spinlock_t inject_lock; 30 raw_spinlock_t inject_lock;
31 unsigned long irq_ack; 31 unsigned long irq_ack;
32 struct kvm_irq_ack_notifier irq_ack_notifier; 32 struct kvm_irq_ack_notifier irq_ack_notifier;
33}; 33};
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index d057c0cbd245..07771da85de5 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -44,18 +44,19 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
44 * Other interrupt may be delivered to PIC while lock is dropped but 44 * Other interrupt may be delivered to PIC while lock is dropped but
45 * it should be safe since PIC state is already updated at this stage. 45 * it should be safe since PIC state is already updated at this stage.
46 */ 46 */
47 spin_unlock(&s->pics_state->lock); 47 raw_spin_unlock(&s->pics_state->lock);
48 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); 48 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
49 spin_lock(&s->pics_state->lock); 49 raw_spin_lock(&s->pics_state->lock);
50} 50}
51 51
52void kvm_pic_clear_isr_ack(struct kvm *kvm) 52void kvm_pic_clear_isr_ack(struct kvm *kvm)
53{ 53{
54 struct kvm_pic *s = pic_irqchip(kvm); 54 struct kvm_pic *s = pic_irqchip(kvm);
55 spin_lock(&s->lock); 55
56 raw_spin_lock(&s->lock);
56 s->pics[0].isr_ack = 0xff; 57 s->pics[0].isr_ack = 0xff;
57 s->pics[1].isr_ack = 0xff; 58 s->pics[1].isr_ack = 0xff;
58 spin_unlock(&s->lock); 59 raw_spin_unlock(&s->lock);
59} 60}
60 61
61/* 62/*
@@ -156,9 +157,9 @@ static void pic_update_irq(struct kvm_pic *s)
156 157
157void kvm_pic_update_irq(struct kvm_pic *s) 158void kvm_pic_update_irq(struct kvm_pic *s)
158{ 159{
159 spin_lock(&s->lock); 160 raw_spin_lock(&s->lock);
160 pic_update_irq(s); 161 pic_update_irq(s);
161 spin_unlock(&s->lock); 162 raw_spin_unlock(&s->lock);
162} 163}
163 164
164int kvm_pic_set_irq(void *opaque, int irq, int level) 165int kvm_pic_set_irq(void *opaque, int irq, int level)
@@ -166,14 +167,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
166 struct kvm_pic *s = opaque; 167 struct kvm_pic *s = opaque;
167 int ret = -1; 168 int ret = -1;
168 169
169 spin_lock(&s->lock); 170 raw_spin_lock(&s->lock);
170 if (irq >= 0 && irq < PIC_NUM_PINS) { 171 if (irq >= 0 && irq < PIC_NUM_PINS) {
171 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); 172 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
172 pic_update_irq(s); 173 pic_update_irq(s);
173 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, 174 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
174 s->pics[irq >> 3].imr, ret == 0); 175 s->pics[irq >> 3].imr, ret == 0);
175 } 176 }
176 spin_unlock(&s->lock); 177 raw_spin_unlock(&s->lock);
177 178
178 return ret; 179 return ret;
179} 180}
@@ -203,7 +204,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
203 int irq, irq2, intno; 204 int irq, irq2, intno;
204 struct kvm_pic *s = pic_irqchip(kvm); 205 struct kvm_pic *s = pic_irqchip(kvm);
205 206
206 spin_lock(&s->lock); 207 raw_spin_lock(&s->lock);
207 irq = pic_get_irq(&s->pics[0]); 208 irq = pic_get_irq(&s->pics[0]);
208 if (irq >= 0) { 209 if (irq >= 0) {
209 pic_intack(&s->pics[0], irq); 210 pic_intack(&s->pics[0], irq);
@@ -228,7 +229,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
228 intno = s->pics[0].irq_base + irq; 229 intno = s->pics[0].irq_base + irq;
229 } 230 }
230 pic_update_irq(s); 231 pic_update_irq(s);
231 spin_unlock(&s->lock); 232 raw_spin_unlock(&s->lock);
232 233
233 return intno; 234 return intno;
234} 235}
@@ -442,7 +443,7 @@ static int picdev_write(struct kvm_io_device *this,
442 printk(KERN_ERR "PIC: non byte write\n"); 443 printk(KERN_ERR "PIC: non byte write\n");
443 return 0; 444 return 0;
444 } 445 }
445 spin_lock(&s->lock); 446 raw_spin_lock(&s->lock);
446 switch (addr) { 447 switch (addr) {
447 case 0x20: 448 case 0x20:
448 case 0x21: 449 case 0x21:
@@ -455,7 +456,7 @@ static int picdev_write(struct kvm_io_device *this,
455 elcr_ioport_write(&s->pics[addr & 1], addr, data); 456 elcr_ioport_write(&s->pics[addr & 1], addr, data);
456 break; 457 break;
457 } 458 }
458 spin_unlock(&s->lock); 459 raw_spin_unlock(&s->lock);
459 return 0; 460 return 0;
460} 461}
461 462
@@ -472,7 +473,7 @@ static int picdev_read(struct kvm_io_device *this,
472 printk(KERN_ERR "PIC: non byte read\n"); 473 printk(KERN_ERR "PIC: non byte read\n");
473 return 0; 474 return 0;
474 } 475 }
475 spin_lock(&s->lock); 476 raw_spin_lock(&s->lock);
476 switch (addr) { 477 switch (addr) {
477 case 0x20: 478 case 0x20:
478 case 0x21: 479 case 0x21:
@@ -486,7 +487,7 @@ static int picdev_read(struct kvm_io_device *this,
486 break; 487 break;
487 } 488 }
488 *(unsigned char *)val = data; 489 *(unsigned char *)val = data;
489 spin_unlock(&s->lock); 490 raw_spin_unlock(&s->lock);
490 return 0; 491 return 0;
491} 492}
492 493
@@ -520,7 +521,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
520 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); 521 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
521 if (!s) 522 if (!s)
522 return NULL; 523 return NULL;
523 spin_lock_init(&s->lock); 524 raw_spin_lock_init(&s->lock);
524 s->kvm = kvm; 525 s->kvm = kvm;
525 s->pics[0].elcr_mask = 0xf8; 526 s->pics[0].elcr_mask = 0xf8;
526 s->pics[1].elcr_mask = 0xde; 527 s->pics[1].elcr_mask = 0xde;
@@ -533,7 +534,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
533 * Initialize PIO device 534 * Initialize PIO device
534 */ 535 */
535 kvm_iodevice_init(&s->dev, &picdev_ops); 536 kvm_iodevice_init(&s->dev, &picdev_ops);
536 ret = kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev); 537 mutex_lock(&kvm->slots_lock);
538 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
539 mutex_unlock(&kvm->slots_lock);
537 if (ret < 0) { 540 if (ret < 0) {
538 kfree(s); 541 kfree(s);
539 return NULL; 542 return NULL;
@@ -541,3 +544,14 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
541 544
542 return s; 545 return s;
543} 546}
547
548void kvm_destroy_pic(struct kvm *kvm)
549{
550 struct kvm_pic *vpic = kvm->arch.vpic;
551
552 if (vpic) {
553 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev);
554 kvm->arch.vpic = NULL;
555 kfree(vpic);
556 }
557}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index be399e207d57..34b15915754d 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -62,7 +62,7 @@ struct kvm_kpic_state {
62}; 62};
63 63
64struct kvm_pic { 64struct kvm_pic {
65 spinlock_t lock; 65 raw_spinlock_t lock;
66 unsigned pending_acks; 66 unsigned pending_acks;
67 struct kvm *kvm; 67 struct kvm *kvm;
68 struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */ 68 struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
@@ -75,6 +75,7 @@ struct kvm_pic {
75}; 75};
76 76
77struct kvm_pic *kvm_create_pic(struct kvm *kvm); 77struct kvm_pic *kvm_create_pic(struct kvm *kvm);
78void kvm_destroy_pic(struct kvm *kvm);
78int kvm_pic_read_irq(struct kvm *kvm); 79int kvm_pic_read_irq(struct kvm *kvm);
79void kvm_pic_update_irq(struct kvm_pic *s); 80void kvm_pic_update_irq(struct kvm_pic *s);
80void kvm_pic_clear_isr_ack(struct kvm *kvm); 81void kvm_pic_clear_isr_ack(struct kvm *kvm);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 7bcc5b6a4403..cff851cf5322 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -1,6 +1,11 @@
1#ifndef ASM_KVM_CACHE_REGS_H 1#ifndef ASM_KVM_CACHE_REGS_H
2#define ASM_KVM_CACHE_REGS_H 2#define ASM_KVM_CACHE_REGS_H
3 3
4#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5#define KVM_POSSIBLE_CR4_GUEST_BITS \
6 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
7 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
8
4static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, 9static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
5 enum kvm_reg reg) 10 enum kvm_reg reg)
6{ 11{
@@ -38,4 +43,30 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38 return vcpu->arch.pdptrs[index]; 43 return vcpu->arch.pdptrs[index];
39} 44}
40 45
46static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
47{
48 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
49 if (tmask & vcpu->arch.cr0_guest_owned_bits)
50 kvm_x86_ops->decache_cr0_guest_bits(vcpu);
51 return vcpu->arch.cr0 & mask;
52}
53
54static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
55{
56 return kvm_read_cr0_bits(vcpu, ~0UL);
57}
58
59static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
60{
61 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
62 if (tmask & vcpu->arch.cr4_guest_owned_bits)
63 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
64 return vcpu->arch.cr4 & mask;
65}
66
67static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
68{
69 return kvm_read_cr4_bits(vcpu, ~0UL);
70}
71
41#endif 72#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ba8c045da782..4b224f90087b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1246,3 +1246,34 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
1246 1246
1247 return 0; 1247 return 0;
1248} 1248}
1249
1250int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
1251{
1252 struct kvm_lapic *apic = vcpu->arch.apic;
1253
1254 if (!irqchip_in_kernel(vcpu->kvm))
1255 return 1;
1256
1257 /* if this is ICR write vector before command */
1258 if (reg == APIC_ICR)
1259 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
1260 return apic_reg_write(apic, reg, (u32)data);
1261}
1262
1263int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
1264{
1265 struct kvm_lapic *apic = vcpu->arch.apic;
1266 u32 low, high = 0;
1267
1268 if (!irqchip_in_kernel(vcpu->kvm))
1269 return 1;
1270
1271 if (apic_reg_read(apic, reg, 4, &low))
1272 return 1;
1273 if (reg == APIC_ICR)
1274 apic_reg_read(apic, APIC_ICR2, 4, &high);
1275
1276 *data = (((u64)high) << 32) | low;
1277
1278 return 0;
1279}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 40010b09c4aa..f5fe32c5edad 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -48,4 +48,12 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
48 48
49int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data); 49int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
50int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 50int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
51
52int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
53int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
54
55static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
56{
57 return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
58}
51#endif 59#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 89a49fb46a27..741373e8ca77 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "mmu.h" 20#include "mmu.h"
21#include "x86.h"
21#include "kvm_cache_regs.h" 22#include "kvm_cache_regs.h"
22 23
23#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
@@ -29,6 +30,7 @@
29#include <linux/swap.h> 30#include <linux/swap.h>
30#include <linux/hugetlb.h> 31#include <linux/hugetlb.h>
31#include <linux/compiler.h> 32#include <linux/compiler.h>
33#include <linux/srcu.h>
32 34
33#include <asm/page.h> 35#include <asm/page.h>
34#include <asm/cmpxchg.h> 36#include <asm/cmpxchg.h>
@@ -136,16 +138,6 @@ module_param(oos_shadow, bool, 0644);
136#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ 138#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
137 | PT64_NX_MASK) 139 | PT64_NX_MASK)
138 140
139#define PFERR_PRESENT_MASK (1U << 0)
140#define PFERR_WRITE_MASK (1U << 1)
141#define PFERR_USER_MASK (1U << 2)
142#define PFERR_RSVD_MASK (1U << 3)
143#define PFERR_FETCH_MASK (1U << 4)
144
145#define PT_PDPE_LEVEL 3
146#define PT_DIRECTORY_LEVEL 2
147#define PT_PAGE_TABLE_LEVEL 1
148
149#define RMAP_EXT 4 141#define RMAP_EXT 4
150 142
151#define ACC_EXEC_MASK 1 143#define ACC_EXEC_MASK 1
@@ -153,6 +145,9 @@ module_param(oos_shadow, bool, 0644);
153#define ACC_USER_MASK PT_USER_MASK 145#define ACC_USER_MASK PT_USER_MASK
154#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 146#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
155 147
148#include <trace/events/kvm.h>
149
150#undef TRACE_INCLUDE_FILE
156#define CREATE_TRACE_POINTS 151#define CREATE_TRACE_POINTS
157#include "mmutrace.h" 152#include "mmutrace.h"
158 153
@@ -229,7 +224,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
229 224
230static int is_write_protection(struct kvm_vcpu *vcpu) 225static int is_write_protection(struct kvm_vcpu *vcpu)
231{ 226{
232 return vcpu->arch.cr0 & X86_CR0_WP; 227 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
233} 228}
234 229
235static int is_cpuid_PSE36(void) 230static int is_cpuid_PSE36(void)
@@ -239,7 +234,7 @@ static int is_cpuid_PSE36(void)
239 234
240static int is_nx(struct kvm_vcpu *vcpu) 235static int is_nx(struct kvm_vcpu *vcpu)
241{ 236{
242 return vcpu->arch.shadow_efer & EFER_NX; 237 return vcpu->arch.efer & EFER_NX;
243} 238}
244 239
245static int is_shadow_present_pte(u64 pte) 240static int is_shadow_present_pte(u64 pte)
@@ -253,7 +248,7 @@ static int is_large_pte(u64 pte)
253 return pte & PT_PAGE_SIZE_MASK; 248 return pte & PT_PAGE_SIZE_MASK;
254} 249}
255 250
256static int is_writeble_pte(unsigned long pte) 251static int is_writable_pte(unsigned long pte)
257{ 252{
258 return pte & PT_WRITABLE_MASK; 253 return pte & PT_WRITABLE_MASK;
259} 254}
@@ -470,24 +465,10 @@ static int has_wrprotected_page(struct kvm *kvm,
470 465
471static int host_mapping_level(struct kvm *kvm, gfn_t gfn) 466static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
472{ 467{
473 unsigned long page_size = PAGE_SIZE; 468 unsigned long page_size;
474 struct vm_area_struct *vma;
475 unsigned long addr;
476 int i, ret = 0; 469 int i, ret = 0;
477 470
478 addr = gfn_to_hva(kvm, gfn); 471 page_size = kvm_host_page_size(kvm, gfn);
479 if (kvm_is_error_hva(addr))
480 return PT_PAGE_TABLE_LEVEL;
481
482 down_read(&current->mm->mmap_sem);
483 vma = find_vma(current->mm, addr);
484 if (!vma)
485 goto out;
486
487 page_size = vma_kernel_pagesize(vma);
488
489out:
490 up_read(&current->mm->mmap_sem);
491 472
492 for (i = PT_PAGE_TABLE_LEVEL; 473 for (i = PT_PAGE_TABLE_LEVEL;
493 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { 474 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
@@ -503,8 +484,7 @@ out:
503static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 484static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
504{ 485{
505 struct kvm_memory_slot *slot; 486 struct kvm_memory_slot *slot;
506 int host_level; 487 int host_level, level, max_level;
507 int level = PT_PAGE_TABLE_LEVEL;
508 488
509 slot = gfn_to_memslot(vcpu->kvm, large_gfn); 489 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
510 if (slot && slot->dirty_bitmap) 490 if (slot && slot->dirty_bitmap)
@@ -515,7 +495,10 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
515 if (host_level == PT_PAGE_TABLE_LEVEL) 495 if (host_level == PT_PAGE_TABLE_LEVEL)
516 return host_level; 496 return host_level;
517 497
518 for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) 498 max_level = kvm_x86_ops->get_lpage_level() < host_level ?
499 kvm_x86_ops->get_lpage_level() : host_level;
500
501 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
519 if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) 502 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
520 break; 503 break;
521 504
@@ -633,7 +616,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
633 pfn = spte_to_pfn(*spte); 616 pfn = spte_to_pfn(*spte);
634 if (*spte & shadow_accessed_mask) 617 if (*spte & shadow_accessed_mask)
635 kvm_set_pfn_accessed(pfn); 618 kvm_set_pfn_accessed(pfn);
636 if (is_writeble_pte(*spte)) 619 if (is_writable_pte(*spte))
637 kvm_set_pfn_dirty(pfn); 620 kvm_set_pfn_dirty(pfn);
638 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); 621 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
639 if (!*rmapp) { 622 if (!*rmapp) {
@@ -662,6 +645,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
662 prev_desc = desc; 645 prev_desc = desc;
663 desc = desc->more; 646 desc = desc->more;
664 } 647 }
648 pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
665 BUG(); 649 BUG();
666 } 650 }
667} 651}
@@ -708,7 +692,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
708 BUG_ON(!spte); 692 BUG_ON(!spte);
709 BUG_ON(!(*spte & PT_PRESENT_MASK)); 693 BUG_ON(!(*spte & PT_PRESENT_MASK));
710 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 694 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
711 if (is_writeble_pte(*spte)) { 695 if (is_writable_pte(*spte)) {
712 __set_spte(spte, *spte & ~PT_WRITABLE_MASK); 696 __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
713 write_protected = 1; 697 write_protected = 1;
714 } 698 }
@@ -732,7 +716,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
732 BUG_ON(!(*spte & PT_PRESENT_MASK)); 716 BUG_ON(!(*spte & PT_PRESENT_MASK));
733 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); 717 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
734 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); 718 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
735 if (is_writeble_pte(*spte)) { 719 if (is_writable_pte(*spte)) {
736 rmap_remove(kvm, spte); 720 rmap_remove(kvm, spte);
737 --kvm->stat.lpages; 721 --kvm->stat.lpages;
738 __set_spte(spte, shadow_trap_nonpresent_pte); 722 __set_spte(spte, shadow_trap_nonpresent_pte);
@@ -787,7 +771,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
787 771
788 new_spte &= ~PT_WRITABLE_MASK; 772 new_spte &= ~PT_WRITABLE_MASK;
789 new_spte &= ~SPTE_HOST_WRITEABLE; 773 new_spte &= ~SPTE_HOST_WRITEABLE;
790 if (is_writeble_pte(*spte)) 774 if (is_writable_pte(*spte))
791 kvm_set_pfn_dirty(spte_to_pfn(*spte)); 775 kvm_set_pfn_dirty(spte_to_pfn(*spte));
792 __set_spte(spte, new_spte); 776 __set_spte(spte, new_spte);
793 spte = rmap_next(kvm, rmapp, spte); 777 spte = rmap_next(kvm, rmapp, spte);
@@ -805,35 +789,32 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
805 unsigned long data)) 789 unsigned long data))
806{ 790{
807 int i, j; 791 int i, j;
792 int ret;
808 int retval = 0; 793 int retval = 0;
794 struct kvm_memslots *slots;
809 795
810 /* 796 slots = rcu_dereference(kvm->memslots);
811 * If mmap_sem isn't taken, we can look the memslots with only 797
812 * the mmu_lock by skipping over the slots with userspace_addr == 0. 798 for (i = 0; i < slots->nmemslots; i++) {
813 */ 799 struct kvm_memory_slot *memslot = &slots->memslots[i];
814 for (i = 0; i < kvm->nmemslots; i++) {
815 struct kvm_memory_slot *memslot = &kvm->memslots[i];
816 unsigned long start = memslot->userspace_addr; 800 unsigned long start = memslot->userspace_addr;
817 unsigned long end; 801 unsigned long end;
818 802
819 /* mmu_lock protects userspace_addr */
820 if (!start)
821 continue;
822
823 end = start + (memslot->npages << PAGE_SHIFT); 803 end = start + (memslot->npages << PAGE_SHIFT);
824 if (hva >= start && hva < end) { 804 if (hva >= start && hva < end) {
825 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 805 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
826 806
827 retval |= handler(kvm, &memslot->rmap[gfn_offset], 807 ret = handler(kvm, &memslot->rmap[gfn_offset], data);
828 data);
829 808
830 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { 809 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
831 int idx = gfn_offset; 810 int idx = gfn_offset;
832 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); 811 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
833 retval |= handler(kvm, 812 ret |= handler(kvm,
834 &memslot->lpage_info[j][idx].rmap_pde, 813 &memslot->lpage_info[j][idx].rmap_pde,
835 data); 814 data);
836 } 815 }
816 trace_kvm_age_page(hva, memslot, ret);
817 retval |= ret;
837 } 818 }
838 } 819 }
839 820
@@ -856,9 +837,15 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
856 u64 *spte; 837 u64 *spte;
857 int young = 0; 838 int young = 0;
858 839
859 /* always return old for EPT */ 840 /*
841 * Emulate the accessed bit for EPT, by checking if this page has
842 * an EPT mapping, and clearing it if it does. On the next access,
843 * a new EPT mapping will be established.
844 * This has some overhead, but not as much as the cost of swapping
845 * out actively used pages or breaking up actively used hugepages.
846 */
860 if (!shadow_accessed_mask) 847 if (!shadow_accessed_mask)
861 return 0; 848 return kvm_unmap_rmapp(kvm, rmapp, data);
862 849
863 spte = rmap_next(kvm, rmapp, NULL); 850 spte = rmap_next(kvm, rmapp, NULL);
864 while (spte) { 851 while (spte) {
@@ -1615,7 +1602,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1615 1602
1616static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) 1603static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1617{ 1604{
1618 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); 1605 int slot = memslot_id(kvm, gfn);
1619 struct kvm_mmu_page *sp = page_header(__pa(pte)); 1606 struct kvm_mmu_page *sp = page_header(__pa(pte));
1620 1607
1621 __set_bit(slot, sp->slot_bitmap); 1608 __set_bit(slot, sp->slot_bitmap);
@@ -1639,7 +1626,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1639{ 1626{
1640 struct page *page; 1627 struct page *page;
1641 1628
1642 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1629 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
1643 1630
1644 if (gpa == UNMAPPED_GVA) 1631 if (gpa == UNMAPPED_GVA)
1645 return NULL; 1632 return NULL;
@@ -1852,7 +1839,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1852 * is responsibility of mmu_get_page / kvm_sync_page. 1839 * is responsibility of mmu_get_page / kvm_sync_page.
1853 * Same reasoning can be applied to dirty page accounting. 1840 * Same reasoning can be applied to dirty page accounting.
1854 */ 1841 */
1855 if (!can_unsync && is_writeble_pte(*sptep)) 1842 if (!can_unsync && is_writable_pte(*sptep))
1856 goto set_pte; 1843 goto set_pte;
1857 1844
1858 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 1845 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
@@ -1860,7 +1847,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1860 __func__, gfn); 1847 __func__, gfn);
1861 ret = 1; 1848 ret = 1;
1862 pte_access &= ~ACC_WRITE_MASK; 1849 pte_access &= ~ACC_WRITE_MASK;
1863 if (is_writeble_pte(spte)) 1850 if (is_writable_pte(spte))
1864 spte &= ~PT_WRITABLE_MASK; 1851 spte &= ~PT_WRITABLE_MASK;
1865 } 1852 }
1866 } 1853 }
@@ -1881,7 +1868,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1881 bool reset_host_protection) 1868 bool reset_host_protection)
1882{ 1869{
1883 int was_rmapped = 0; 1870 int was_rmapped = 0;
1884 int was_writeble = is_writeble_pte(*sptep); 1871 int was_writable = is_writable_pte(*sptep);
1885 int rmap_count; 1872 int rmap_count;
1886 1873
1887 pgprintk("%s: spte %llx access %x write_fault %d" 1874 pgprintk("%s: spte %llx access %x write_fault %d"
@@ -1932,7 +1919,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1932 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1919 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1933 rmap_recycle(vcpu, sptep, gfn); 1920 rmap_recycle(vcpu, sptep, gfn);
1934 } else { 1921 } else {
1935 if (was_writeble) 1922 if (was_writable)
1936 kvm_release_pfn_dirty(pfn); 1923 kvm_release_pfn_dirty(pfn);
1937 else 1924 else
1938 kvm_release_pfn_clean(pfn); 1925 kvm_release_pfn_clean(pfn);
@@ -2162,8 +2149,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2162 spin_unlock(&vcpu->kvm->mmu_lock); 2149 spin_unlock(&vcpu->kvm->mmu_lock);
2163} 2150}
2164 2151
2165static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 2152static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2153 u32 access, u32 *error)
2166{ 2154{
2155 if (error)
2156 *error = 0;
2167 return vaddr; 2157 return vaddr;
2168} 2158}
2169 2159
@@ -2747,7 +2737,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2747 if (tdp_enabled) 2737 if (tdp_enabled)
2748 return 0; 2738 return 0;
2749 2739
2750 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 2740 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2751 2741
2752 spin_lock(&vcpu->kvm->mmu_lock); 2742 spin_lock(&vcpu->kvm->mmu_lock);
2753 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 2743 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -2847,16 +2837,13 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2847 */ 2837 */
2848 page = alloc_page(GFP_KERNEL | __GFP_DMA32); 2838 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2849 if (!page) 2839 if (!page)
2850 goto error_1; 2840 return -ENOMEM;
2841
2851 vcpu->arch.mmu.pae_root = page_address(page); 2842 vcpu->arch.mmu.pae_root = page_address(page);
2852 for (i = 0; i < 4; ++i) 2843 for (i = 0; i < 4; ++i)
2853 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 2844 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2854 2845
2855 return 0; 2846 return 0;
2856
2857error_1:
2858 free_mmu_pages(vcpu);
2859 return -ENOMEM;
2860} 2847}
2861 2848
2862int kvm_mmu_create(struct kvm_vcpu *vcpu) 2849int kvm_mmu_create(struct kvm_vcpu *vcpu)
@@ -2936,10 +2923,9 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2936 spin_lock(&kvm_lock); 2923 spin_lock(&kvm_lock);
2937 2924
2938 list_for_each_entry(kvm, &vm_list, vm_list) { 2925 list_for_each_entry(kvm, &vm_list, vm_list) {
2939 int npages; 2926 int npages, idx;
2940 2927
2941 if (!down_read_trylock(&kvm->slots_lock)) 2928 idx = srcu_read_lock(&kvm->srcu);
2942 continue;
2943 spin_lock(&kvm->mmu_lock); 2929 spin_lock(&kvm->mmu_lock);
2944 npages = kvm->arch.n_alloc_mmu_pages - 2930 npages = kvm->arch.n_alloc_mmu_pages -
2945 kvm->arch.n_free_mmu_pages; 2931 kvm->arch.n_free_mmu_pages;
@@ -2952,7 +2938,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2952 nr_to_scan--; 2938 nr_to_scan--;
2953 2939
2954 spin_unlock(&kvm->mmu_lock); 2940 spin_unlock(&kvm->mmu_lock);
2955 up_read(&kvm->slots_lock); 2941 srcu_read_unlock(&kvm->srcu, idx);
2956 } 2942 }
2957 if (kvm_freed) 2943 if (kvm_freed)
2958 list_move_tail(&kvm_freed->vm_list, &vm_list); 2944 list_move_tail(&kvm_freed->vm_list, &vm_list);
@@ -3019,9 +3005,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3019 int i; 3005 int i;
3020 unsigned int nr_mmu_pages; 3006 unsigned int nr_mmu_pages;
3021 unsigned int nr_pages = 0; 3007 unsigned int nr_pages = 0;
3008 struct kvm_memslots *slots;
3022 3009
3023 for (i = 0; i < kvm->nmemslots; i++) 3010 slots = rcu_dereference(kvm->memslots);
3024 nr_pages += kvm->memslots[i].npages; 3011 for (i = 0; i < slots->nmemslots; i++)
3012 nr_pages += slots->memslots[i].npages;
3025 3013
3026 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; 3014 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3027 nr_mmu_pages = max(nr_mmu_pages, 3015 nr_mmu_pages = max(nr_mmu_pages,
@@ -3246,7 +3234,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3246 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level)) 3234 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3247 audit_mappings_page(vcpu, ent, va, level - 1); 3235 audit_mappings_page(vcpu, ent, va, level - 1);
3248 else { 3236 else {
3249 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); 3237 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
3250 gfn_t gfn = gpa >> PAGE_SHIFT; 3238 gfn_t gfn = gpa >> PAGE_SHIFT;
3251 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn); 3239 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3252 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT; 3240 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
@@ -3291,10 +3279,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
3291static int count_rmaps(struct kvm_vcpu *vcpu) 3279static int count_rmaps(struct kvm_vcpu *vcpu)
3292{ 3280{
3293 int nmaps = 0; 3281 int nmaps = 0;
3294 int i, j, k; 3282 int i, j, k, idx;
3295 3283
3284 idx = srcu_read_lock(&kvm->srcu);
3285 slots = rcu_dereference(kvm->memslots);
3296 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 3286 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3297 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i]; 3287 struct kvm_memory_slot *m = &slots->memslots[i];
3298 struct kvm_rmap_desc *d; 3288 struct kvm_rmap_desc *d;
3299 3289
3300 for (j = 0; j < m->npages; ++j) { 3290 for (j = 0; j < m->npages; ++j) {
@@ -3317,6 +3307,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
3317 } 3307 }
3318 } 3308 }
3319 } 3309 }
3310 srcu_read_unlock(&kvm->srcu, idx);
3320 return nmaps; 3311 return nmaps;
3321} 3312}
3322 3313
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 61a1b3884b49..be66759321a5 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -2,6 +2,7 @@
2#define __KVM_X86_MMU_H 2#define __KVM_X86_MMU_H
3 3
4#include <linux/kvm_host.h> 4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
5 6
6#define PT64_PT_BITS 9 7#define PT64_PT_BITS 9
7#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) 8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -37,6 +38,16 @@
37#define PT32_ROOT_LEVEL 2 38#define PT32_ROOT_LEVEL 2
38#define PT32E_ROOT_LEVEL 3 39#define PT32E_ROOT_LEVEL 3
39 40
41#define PT_PDPE_LEVEL 3
42#define PT_DIRECTORY_LEVEL 2
43#define PT_PAGE_TABLE_LEVEL 1
44
45#define PFERR_PRESENT_MASK (1U << 0)
46#define PFERR_WRITE_MASK (1U << 1)
47#define PFERR_USER_MASK (1U << 2)
48#define PFERR_RSVD_MASK (1U << 3)
49#define PFERR_FETCH_MASK (1U << 4)
50
40int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
41 52
42static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 53static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
@@ -53,30 +64,6 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
53 return kvm_mmu_load(vcpu); 64 return kvm_mmu_load(vcpu);
54} 65}
55 66
56static inline int is_long_mode(struct kvm_vcpu *vcpu)
57{
58#ifdef CONFIG_X86_64
59 return vcpu->arch.shadow_efer & EFER_LMA;
60#else
61 return 0;
62#endif
63}
64
65static inline int is_pae(struct kvm_vcpu *vcpu)
66{
67 return vcpu->arch.cr4 & X86_CR4_PAE;
68}
69
70static inline int is_pse(struct kvm_vcpu *vcpu)
71{
72 return vcpu->arch.cr4 & X86_CR4_PSE;
73}
74
75static inline int is_paging(struct kvm_vcpu *vcpu)
76{
77 return vcpu->arch.cr0 & X86_CR0_PG;
78}
79
80static inline int is_present_gpte(unsigned long pte) 67static inline int is_present_gpte(unsigned long pte)
81{ 68{
82 return pte & PT_PRESENT_MASK; 69 return pte & PT_PRESENT_MASK;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ede2131a9225..81eab9a50e6a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -162,7 +162,7 @@ walk:
162 if (rsvd_fault) 162 if (rsvd_fault)
163 goto access_error; 163 goto access_error;
164 164
165 if (write_fault && !is_writeble_pte(pte)) 165 if (write_fault && !is_writable_pte(pte))
166 if (user_fault || is_write_protection(vcpu)) 166 if (user_fault || is_write_protection(vcpu))
167 goto access_error; 167 goto access_error;
168 168
@@ -490,18 +490,23 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
490 spin_unlock(&vcpu->kvm->mmu_lock); 490 spin_unlock(&vcpu->kvm->mmu_lock);
491} 491}
492 492
493static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 493static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
494 u32 *error)
494{ 495{
495 struct guest_walker walker; 496 struct guest_walker walker;
496 gpa_t gpa = UNMAPPED_GVA; 497 gpa_t gpa = UNMAPPED_GVA;
497 int r; 498 int r;
498 499
499 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0); 500 r = FNAME(walk_addr)(&walker, vcpu, vaddr,
501 !!(access & PFERR_WRITE_MASK),
502 !!(access & PFERR_USER_MASK),
503 !!(access & PFERR_FETCH_MASK));
500 504
501 if (r) { 505 if (r) {
502 gpa = gfn_to_gpa(walker.gfn); 506 gpa = gfn_to_gpa(walker.gfn);
503 gpa |= vaddr & ~PAGE_MASK; 507 gpa |= vaddr & ~PAGE_MASK;
504 } 508 } else if (error)
509 *error = walker.error_code;
505 510
506 return gpa; 511 return gpa;
507} 512}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1d9b33843c80..52f78dd03010 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -231,7 +231,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
231 efer &= ~EFER_LME; 231 efer &= ~EFER_LME;
232 232
233 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; 233 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
234 vcpu->arch.shadow_efer = efer; 234 vcpu->arch.efer = efer;
235} 235}
236 236
237static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 237static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -540,6 +540,8 @@ static void init_vmcb(struct vcpu_svm *svm)
540 struct vmcb_control_area *control = &svm->vmcb->control; 540 struct vmcb_control_area *control = &svm->vmcb->control;
541 struct vmcb_save_area *save = &svm->vmcb->save; 541 struct vmcb_save_area *save = &svm->vmcb->save;
542 542
543 svm->vcpu.fpu_active = 1;
544
543 control->intercept_cr_read = INTERCEPT_CR0_MASK | 545 control->intercept_cr_read = INTERCEPT_CR0_MASK |
544 INTERCEPT_CR3_MASK | 546 INTERCEPT_CR3_MASK |
545 INTERCEPT_CR4_MASK; 547 INTERCEPT_CR4_MASK;
@@ -552,13 +554,19 @@ static void init_vmcb(struct vcpu_svm *svm)
552 control->intercept_dr_read = INTERCEPT_DR0_MASK | 554 control->intercept_dr_read = INTERCEPT_DR0_MASK |
553 INTERCEPT_DR1_MASK | 555 INTERCEPT_DR1_MASK |
554 INTERCEPT_DR2_MASK | 556 INTERCEPT_DR2_MASK |
555 INTERCEPT_DR3_MASK; 557 INTERCEPT_DR3_MASK |
558 INTERCEPT_DR4_MASK |
559 INTERCEPT_DR5_MASK |
560 INTERCEPT_DR6_MASK |
561 INTERCEPT_DR7_MASK;
556 562
557 control->intercept_dr_write = INTERCEPT_DR0_MASK | 563 control->intercept_dr_write = INTERCEPT_DR0_MASK |
558 INTERCEPT_DR1_MASK | 564 INTERCEPT_DR1_MASK |
559 INTERCEPT_DR2_MASK | 565 INTERCEPT_DR2_MASK |
560 INTERCEPT_DR3_MASK | 566 INTERCEPT_DR3_MASK |
567 INTERCEPT_DR4_MASK |
561 INTERCEPT_DR5_MASK | 568 INTERCEPT_DR5_MASK |
569 INTERCEPT_DR6_MASK |
562 INTERCEPT_DR7_MASK; 570 INTERCEPT_DR7_MASK;
563 571
564 control->intercept_exceptions = (1 << PF_VECTOR) | 572 control->intercept_exceptions = (1 << PF_VECTOR) |
@@ -569,6 +577,7 @@ static void init_vmcb(struct vcpu_svm *svm)
569 control->intercept = (1ULL << INTERCEPT_INTR) | 577 control->intercept = (1ULL << INTERCEPT_INTR) |
570 (1ULL << INTERCEPT_NMI) | 578 (1ULL << INTERCEPT_NMI) |
571 (1ULL << INTERCEPT_SMI) | 579 (1ULL << INTERCEPT_SMI) |
580 (1ULL << INTERCEPT_SELECTIVE_CR0) |
572 (1ULL << INTERCEPT_CPUID) | 581 (1ULL << INTERCEPT_CPUID) |
573 (1ULL << INTERCEPT_INVD) | 582 (1ULL << INTERCEPT_INVD) |
574 (1ULL << INTERCEPT_HLT) | 583 (1ULL << INTERCEPT_HLT) |
@@ -641,10 +650,8 @@ static void init_vmcb(struct vcpu_svm *svm)
641 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | 650 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
642 (1ULL << INTERCEPT_INVLPG)); 651 (1ULL << INTERCEPT_INVLPG));
643 control->intercept_exceptions &= ~(1 << PF_VECTOR); 652 control->intercept_exceptions &= ~(1 << PF_VECTOR);
644 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK| 653 control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
645 INTERCEPT_CR3_MASK); 654 control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
646 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
647 INTERCEPT_CR3_MASK);
648 save->g_pat = 0x0007040600070406ULL; 655 save->g_pat = 0x0007040600070406ULL;
649 save->cr3 = 0; 656 save->cr3 = 0;
650 save->cr4 = 0; 657 save->cr4 = 0;
@@ -730,7 +737,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
730 init_vmcb(svm); 737 init_vmcb(svm);
731 738
732 fx_init(&svm->vcpu); 739 fx_init(&svm->vcpu);
733 svm->vcpu.fpu_active = 1;
734 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 740 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
735 if (kvm_vcpu_is_bsp(&svm->vcpu)) 741 if (kvm_vcpu_is_bsp(&svm->vcpu))
736 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; 742 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
@@ -765,14 +771,16 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
765 if (unlikely(cpu != vcpu->cpu)) { 771 if (unlikely(cpu != vcpu->cpu)) {
766 u64 delta; 772 u64 delta;
767 773
768 /* 774 if (check_tsc_unstable()) {
769 * Make sure that the guest sees a monotonically 775 /*
770 * increasing TSC. 776 * Make sure that the guest sees a monotonically
771 */ 777 * increasing TSC.
772 delta = vcpu->arch.host_tsc - native_read_tsc(); 778 */
773 svm->vmcb->control.tsc_offset += delta; 779 delta = vcpu->arch.host_tsc - native_read_tsc();
774 if (is_nested(svm)) 780 svm->vmcb->control.tsc_offset += delta;
775 svm->nested.hsave->control.tsc_offset += delta; 781 if (is_nested(svm))
782 svm->nested.hsave->control.tsc_offset += delta;
783 }
776 vcpu->cpu = cpu; 784 vcpu->cpu = cpu;
777 kvm_migrate_timers(vcpu); 785 kvm_migrate_timers(vcpu);
778 svm->asid_generation = 0; 786 svm->asid_generation = 0;
@@ -954,42 +962,59 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
954 svm->vmcb->save.gdtr.base = dt->base ; 962 svm->vmcb->save.gdtr.base = dt->base ;
955} 963}
956 964
965static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
966{
967}
968
957static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 969static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
958{ 970{
959} 971}
960 972
973static void update_cr0_intercept(struct vcpu_svm *svm)
974{
975 ulong gcr0 = svm->vcpu.arch.cr0;
976 u64 *hcr0 = &svm->vmcb->save.cr0;
977
978 if (!svm->vcpu.fpu_active)
979 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
980 else
981 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
982 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
983
984
985 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
986 svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
987 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
988 } else {
989 svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
990 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
991 }
992}
993
961static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 994static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
962{ 995{
963 struct vcpu_svm *svm = to_svm(vcpu); 996 struct vcpu_svm *svm = to_svm(vcpu);
964 997
965#ifdef CONFIG_X86_64 998#ifdef CONFIG_X86_64
966 if (vcpu->arch.shadow_efer & EFER_LME) { 999 if (vcpu->arch.efer & EFER_LME) {
967 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1000 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
968 vcpu->arch.shadow_efer |= EFER_LMA; 1001 vcpu->arch.efer |= EFER_LMA;
969 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1002 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
970 } 1003 }
971 1004
972 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 1005 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
973 vcpu->arch.shadow_efer &= ~EFER_LMA; 1006 vcpu->arch.efer &= ~EFER_LMA;
974 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1007 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
975 } 1008 }
976 } 1009 }
977#endif 1010#endif
978 if (npt_enabled) 1011 vcpu->arch.cr0 = cr0;
979 goto set;
980 1012
981 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { 1013 if (!npt_enabled)
982 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 1014 cr0 |= X86_CR0_PG | X86_CR0_WP;
983 vcpu->fpu_active = 1;
984 }
985 1015
986 vcpu->arch.cr0 = cr0; 1016 if (!vcpu->fpu_active)
987 cr0 |= X86_CR0_PG | X86_CR0_WP;
988 if (!vcpu->fpu_active) {
989 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
990 cr0 |= X86_CR0_TS; 1017 cr0 |= X86_CR0_TS;
991 }
992set:
993 /* 1018 /*
994 * re-enable caching here because the QEMU bios 1019 * re-enable caching here because the QEMU bios
995 * does not do it - this results in some delay at 1020 * does not do it - this results in some delay at
@@ -997,6 +1022,7 @@ set:
997 */ 1022 */
998 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1023 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
999 svm->vmcb->save.cr0 = cr0; 1024 svm->vmcb->save.cr0 = cr0;
1025 update_cr0_intercept(svm);
1000} 1026}
1001 1027
1002static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1028static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1102,76 +1128,70 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1102 svm->vmcb->control.asid = sd->next_asid++; 1128 svm->vmcb->control.asid = sd->next_asid++;
1103} 1129}
1104 1130
1105static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1131static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
1106{ 1132{
1107 struct vcpu_svm *svm = to_svm(vcpu); 1133 struct vcpu_svm *svm = to_svm(vcpu);
1108 unsigned long val;
1109 1134
1110 switch (dr) { 1135 switch (dr) {
1111 case 0 ... 3: 1136 case 0 ... 3:
1112 val = vcpu->arch.db[dr]; 1137 *dest = vcpu->arch.db[dr];
1113 break; 1138 break;
1139 case 4:
1140 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1141 return EMULATE_FAIL; /* will re-inject UD */
1142 /* fall through */
1114 case 6: 1143 case 6:
1115 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1144 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1116 val = vcpu->arch.dr6; 1145 *dest = vcpu->arch.dr6;
1117 else 1146 else
1118 val = svm->vmcb->save.dr6; 1147 *dest = svm->vmcb->save.dr6;
1119 break; 1148 break;
1149 case 5:
1150 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1151 return EMULATE_FAIL; /* will re-inject UD */
1152 /* fall through */
1120 case 7: 1153 case 7:
1121 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1154 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1122 val = vcpu->arch.dr7; 1155 *dest = vcpu->arch.dr7;
1123 else 1156 else
1124 val = svm->vmcb->save.dr7; 1157 *dest = svm->vmcb->save.dr7;
1125 break; 1158 break;
1126 default:
1127 val = 0;
1128 } 1159 }
1129 1160
1130 return val; 1161 return EMULATE_DONE;
1131} 1162}
1132 1163
1133static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, 1164static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
1134 int *exception)
1135{ 1165{
1136 struct vcpu_svm *svm = to_svm(vcpu); 1166 struct vcpu_svm *svm = to_svm(vcpu);
1137 1167
1138 *exception = 0;
1139
1140 switch (dr) { 1168 switch (dr) {
1141 case 0 ... 3: 1169 case 0 ... 3:
1142 vcpu->arch.db[dr] = value; 1170 vcpu->arch.db[dr] = value;
1143 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1171 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1144 vcpu->arch.eff_db[dr] = value; 1172 vcpu->arch.eff_db[dr] = value;
1145 return; 1173 break;
1146 case 4 ... 5: 1174 case 4:
1147 if (vcpu->arch.cr4 & X86_CR4_DE) 1175 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1148 *exception = UD_VECTOR; 1176 return EMULATE_FAIL; /* will re-inject UD */
1149 return; 1177 /* fall through */
1150 case 6: 1178 case 6:
1151 if (value & 0xffffffff00000000ULL) {
1152 *exception = GP_VECTOR;
1153 return;
1154 }
1155 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; 1179 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1156 return; 1180 break;
1181 case 5:
1182 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1183 return EMULATE_FAIL; /* will re-inject UD */
1184 /* fall through */
1157 case 7: 1185 case 7:
1158 if (value & 0xffffffff00000000ULL) {
1159 *exception = GP_VECTOR;
1160 return;
1161 }
1162 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; 1186 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1163 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1187 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1164 svm->vmcb->save.dr7 = vcpu->arch.dr7; 1188 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1165 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); 1189 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1166 } 1190 }
1167 return; 1191 break;
1168 default:
1169 /* FIXME: Possible case? */
1170 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1171 __func__, dr);
1172 *exception = UD_VECTOR;
1173 return;
1174 } 1192 }
1193
1194 return EMULATE_DONE;
1175} 1195}
1176 1196
1177static int pf_interception(struct vcpu_svm *svm) 1197static int pf_interception(struct vcpu_svm *svm)
@@ -1239,13 +1259,17 @@ static int ud_interception(struct vcpu_svm *svm)
1239 return 1; 1259 return 1;
1240} 1260}
1241 1261
1242static int nm_interception(struct vcpu_svm *svm) 1262static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1243{ 1263{
1264 struct vcpu_svm *svm = to_svm(vcpu);
1244 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 1265 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1245 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
1246 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
1247 svm->vcpu.fpu_active = 1; 1266 svm->vcpu.fpu_active = 1;
1267 update_cr0_intercept(svm);
1268}
1248 1269
1270static int nm_interception(struct vcpu_svm *svm)
1271{
1272 svm_fpu_activate(&svm->vcpu);
1249 return 1; 1273 return 1;
1250} 1274}
1251 1275
@@ -1337,7 +1361,7 @@ static int vmmcall_interception(struct vcpu_svm *svm)
1337 1361
1338static int nested_svm_check_permissions(struct vcpu_svm *svm) 1362static int nested_svm_check_permissions(struct vcpu_svm *svm)
1339{ 1363{
1340 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME) 1364 if (!(svm->vcpu.arch.efer & EFER_SVME)
1341 || !is_paging(&svm->vcpu)) { 1365 || !is_paging(&svm->vcpu)) {
1342 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 1366 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1343 return 1; 1367 return 1;
@@ -1740,8 +1764,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1740 hsave->save.ds = vmcb->save.ds; 1764 hsave->save.ds = vmcb->save.ds;
1741 hsave->save.gdtr = vmcb->save.gdtr; 1765 hsave->save.gdtr = vmcb->save.gdtr;
1742 hsave->save.idtr = vmcb->save.idtr; 1766 hsave->save.idtr = vmcb->save.idtr;
1743 hsave->save.efer = svm->vcpu.arch.shadow_efer; 1767 hsave->save.efer = svm->vcpu.arch.efer;
1744 hsave->save.cr0 = svm->vcpu.arch.cr0; 1768 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
1745 hsave->save.cr4 = svm->vcpu.arch.cr4; 1769 hsave->save.cr4 = svm->vcpu.arch.cr4;
1746 hsave->save.rflags = vmcb->save.rflags; 1770 hsave->save.rflags = vmcb->save.rflags;
1747 hsave->save.rip = svm->next_rip; 1771 hsave->save.rip = svm->next_rip;
@@ -2153,9 +2177,10 @@ static int rdmsr_interception(struct vcpu_svm *svm)
2153 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 2177 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2154 u64 data; 2178 u64 data;
2155 2179
2156 if (svm_get_msr(&svm->vcpu, ecx, &data)) 2180 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
2181 trace_kvm_msr_read_ex(ecx);
2157 kvm_inject_gp(&svm->vcpu, 0); 2182 kvm_inject_gp(&svm->vcpu, 0);
2158 else { 2183 } else {
2159 trace_kvm_msr_read(ecx, data); 2184 trace_kvm_msr_read(ecx, data);
2160 2185
2161 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; 2186 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
@@ -2247,13 +2272,15 @@ static int wrmsr_interception(struct vcpu_svm *svm)
2247 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) 2272 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
2248 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); 2273 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2249 2274
2250 trace_kvm_msr_write(ecx, data);
2251 2275
2252 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 2276 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2253 if (svm_set_msr(&svm->vcpu, ecx, data)) 2277 if (svm_set_msr(&svm->vcpu, ecx, data)) {
2278 trace_kvm_msr_write_ex(ecx, data);
2254 kvm_inject_gp(&svm->vcpu, 0); 2279 kvm_inject_gp(&svm->vcpu, 0);
2255 else 2280 } else {
2281 trace_kvm_msr_write(ecx, data);
2256 skip_emulated_instruction(&svm->vcpu); 2282 skip_emulated_instruction(&svm->vcpu);
2283 }
2257 return 1; 2284 return 1;
2258} 2285}
2259 2286
@@ -2297,7 +2324,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2297 [SVM_EXIT_READ_CR3] = emulate_on_interception, 2324 [SVM_EXIT_READ_CR3] = emulate_on_interception,
2298 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2325 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2299 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2326 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2300 /* for now: */ 2327 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2301 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2328 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
2302 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2329 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2303 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2330 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
@@ -2306,11 +2333,17 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2306 [SVM_EXIT_READ_DR1] = emulate_on_interception, 2333 [SVM_EXIT_READ_DR1] = emulate_on_interception,
2307 [SVM_EXIT_READ_DR2] = emulate_on_interception, 2334 [SVM_EXIT_READ_DR2] = emulate_on_interception,
2308 [SVM_EXIT_READ_DR3] = emulate_on_interception, 2335 [SVM_EXIT_READ_DR3] = emulate_on_interception,
2336 [SVM_EXIT_READ_DR4] = emulate_on_interception,
2337 [SVM_EXIT_READ_DR5] = emulate_on_interception,
2338 [SVM_EXIT_READ_DR6] = emulate_on_interception,
2339 [SVM_EXIT_READ_DR7] = emulate_on_interception,
2309 [SVM_EXIT_WRITE_DR0] = emulate_on_interception, 2340 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
2310 [SVM_EXIT_WRITE_DR1] = emulate_on_interception, 2341 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
2311 [SVM_EXIT_WRITE_DR2] = emulate_on_interception, 2342 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
2312 [SVM_EXIT_WRITE_DR3] = emulate_on_interception, 2343 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
2344 [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
2313 [SVM_EXIT_WRITE_DR5] = emulate_on_interception, 2345 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
2346 [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
2314 [SVM_EXIT_WRITE_DR7] = emulate_on_interception, 2347 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
2315 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, 2348 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2316 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, 2349 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
@@ -2383,20 +2416,10 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2383 2416
2384 svm_complete_interrupts(svm); 2417 svm_complete_interrupts(svm);
2385 2418
2386 if (npt_enabled) { 2419 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
2387 int mmu_reload = 0;
2388 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
2389 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
2390 mmu_reload = 1;
2391 }
2392 vcpu->arch.cr0 = svm->vmcb->save.cr0; 2420 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2421 if (npt_enabled)
2393 vcpu->arch.cr3 = svm->vmcb->save.cr3; 2422 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2394 if (mmu_reload) {
2395 kvm_mmu_reset_context(vcpu);
2396 kvm_mmu_load(vcpu);
2397 }
2398 }
2399
2400 2423
2401 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 2424 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2402 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2425 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -2798,12 +2821,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
2798 2821
2799 svm->vmcb->save.cr3 = root; 2822 svm->vmcb->save.cr3 = root;
2800 force_new_asid(vcpu); 2823 force_new_asid(vcpu);
2801
2802 if (vcpu->fpu_active) {
2803 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
2804 svm->vmcb->save.cr0 |= X86_CR0_TS;
2805 vcpu->fpu_active = 0;
2806 }
2807} 2824}
2808 2825
2809static int is_disabled(void) 2826static int is_disabled(void)
@@ -2852,6 +2869,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
2852 return 0; 2869 return 0;
2853} 2870}
2854 2871
2872static void svm_cpuid_update(struct kvm_vcpu *vcpu)
2873{
2874}
2875
2855static const struct trace_print_flags svm_exit_reasons_str[] = { 2876static const struct trace_print_flags svm_exit_reasons_str[] = {
2856 { SVM_EXIT_READ_CR0, "read_cr0" }, 2877 { SVM_EXIT_READ_CR0, "read_cr0" },
2857 { SVM_EXIT_READ_CR3, "read_cr3" }, 2878 { SVM_EXIT_READ_CR3, "read_cr3" },
@@ -2905,9 +2926,22 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
2905 { -1, NULL } 2926 { -1, NULL }
2906}; 2927};
2907 2928
2908static bool svm_gb_page_enable(void) 2929static int svm_get_lpage_level(void)
2909{ 2930{
2910 return true; 2931 return PT_PDPE_LEVEL;
2932}
2933
2934static bool svm_rdtscp_supported(void)
2935{
2936 return false;
2937}
2938
2939static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
2940{
2941 struct vcpu_svm *svm = to_svm(vcpu);
2942
2943 update_cr0_intercept(svm);
2944 svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
2911} 2945}
2912 2946
2913static struct kvm_x86_ops svm_x86_ops = { 2947static struct kvm_x86_ops svm_x86_ops = {
@@ -2936,6 +2970,7 @@ static struct kvm_x86_ops svm_x86_ops = {
2936 .set_segment = svm_set_segment, 2970 .set_segment = svm_set_segment,
2937 .get_cpl = svm_get_cpl, 2971 .get_cpl = svm_get_cpl,
2938 .get_cs_db_l_bits = kvm_get_cs_db_l_bits, 2972 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
2973 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
2939 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, 2974 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
2940 .set_cr0 = svm_set_cr0, 2975 .set_cr0 = svm_set_cr0,
2941 .set_cr3 = svm_set_cr3, 2976 .set_cr3 = svm_set_cr3,
@@ -2950,6 +2985,8 @@ static struct kvm_x86_ops svm_x86_ops = {
2950 .cache_reg = svm_cache_reg, 2985 .cache_reg = svm_cache_reg,
2951 .get_rflags = svm_get_rflags, 2986 .get_rflags = svm_get_rflags,
2952 .set_rflags = svm_set_rflags, 2987 .set_rflags = svm_set_rflags,
2988 .fpu_activate = svm_fpu_activate,
2989 .fpu_deactivate = svm_fpu_deactivate,
2953 2990
2954 .tlb_flush = svm_flush_tlb, 2991 .tlb_flush = svm_flush_tlb,
2955 2992
@@ -2975,7 +3012,11 @@ static struct kvm_x86_ops svm_x86_ops = {
2975 .get_mt_mask = svm_get_mt_mask, 3012 .get_mt_mask = svm_get_mt_mask,
2976 3013
2977 .exit_reasons_str = svm_exit_reasons_str, 3014 .exit_reasons_str = svm_exit_reasons_str,
2978 .gb_page_enable = svm_gb_page_enable, 3015 .get_lpage_level = svm_get_lpage_level,
3016
3017 .cpuid_update = svm_cpuid_update,
3018
3019 .rdtscp_supported = svm_rdtscp_supported,
2979}; 3020};
2980 3021
2981static int __init svm_init(void) 3022static int __init svm_init(void)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 816e0449db0b..6ad30a29f044 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -56,6 +56,38 @@ TRACE_EVENT(kvm_hypercall,
56); 56);
57 57
58/* 58/*
59 * Tracepoint for hypercall.
60 */
61TRACE_EVENT(kvm_hv_hypercall,
62 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
63 __u64 ingpa, __u64 outgpa),
64 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
65
66 TP_STRUCT__entry(
67 __field( __u16, code )
68 __field( bool, fast )
69 __field( __u16, rep_cnt )
70 __field( __u16, rep_idx )
71 __field( __u64, ingpa )
72 __field( __u64, outgpa )
73 ),
74
75 TP_fast_assign(
76 __entry->code = code;
77 __entry->fast = fast;
78 __entry->rep_cnt = rep_cnt;
79 __entry->rep_idx = rep_idx;
80 __entry->ingpa = ingpa;
81 __entry->outgpa = outgpa;
82 ),
83
84 TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
85 __entry->code, __entry->fast ? "fast" : "slow",
86 __entry->rep_cnt, __entry->rep_idx, __entry->ingpa,
87 __entry->outgpa)
88);
89
90/*
59 * Tracepoint for PIO. 91 * Tracepoint for PIO.
60 */ 92 */
61TRACE_EVENT(kvm_pio, 93TRACE_EVENT(kvm_pio,
@@ -214,28 +246,33 @@ TRACE_EVENT(kvm_page_fault,
214 * Tracepoint for guest MSR access. 246 * Tracepoint for guest MSR access.
215 */ 247 */
216TRACE_EVENT(kvm_msr, 248TRACE_EVENT(kvm_msr,
217 TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data), 249 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
218 TP_ARGS(rw, ecx, data), 250 TP_ARGS(write, ecx, data, exception),
219 251
220 TP_STRUCT__entry( 252 TP_STRUCT__entry(
221 __field( unsigned int, rw ) 253 __field( unsigned, write )
222 __field( unsigned int, ecx ) 254 __field( u32, ecx )
223 __field( unsigned long, data ) 255 __field( u64, data )
256 __field( u8, exception )
224 ), 257 ),
225 258
226 TP_fast_assign( 259 TP_fast_assign(
227 __entry->rw = rw; 260 __entry->write = write;
228 __entry->ecx = ecx; 261 __entry->ecx = ecx;
229 __entry->data = data; 262 __entry->data = data;
263 __entry->exception = exception;
230 ), 264 ),
231 265
232 TP_printk("msr_%s %x = 0x%lx", 266 TP_printk("msr_%s %x = 0x%llx%s",
233 __entry->rw ? "write" : "read", 267 __entry->write ? "write" : "read",
234 __entry->ecx, __entry->data) 268 __entry->ecx, __entry->data,
269 __entry->exception ? " (#GP)" : "")
235); 270);
236 271
237#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data) 272#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false)
238#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data) 273#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false)
274#define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true)
275#define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true)
239 276
240/* 277/*
241 * Tracepoint for guest CR access. 278 * Tracepoint for guest CR access.
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d4918d6fc924..14873b9f8430 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -61,6 +61,21 @@ module_param_named(unrestricted_guest,
61static int __read_mostly emulate_invalid_guest_state = 0; 61static int __read_mostly emulate_invalid_guest_state = 0;
62module_param(emulate_invalid_guest_state, bool, S_IRUGO); 62module_param(emulate_invalid_guest_state, bool, S_IRUGO);
63 63
64#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
65 (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
66#define KVM_GUEST_CR0_MASK \
67 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
68#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
69 (X86_CR0_WP | X86_CR0_NE)
70#define KVM_VM_CR0_ALWAYS_ON \
71 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
72#define KVM_CR4_GUEST_OWNED_BITS \
73 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
74 | X86_CR4_OSXMMEXCPT)
75
76#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
77#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
78
64/* 79/*
65 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 80 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
66 * ple_gap: upper bound on the amount of time between two successive 81 * ple_gap: upper bound on the amount of time between two successive
@@ -136,6 +151,8 @@ struct vcpu_vmx {
136 ktime_t entry_time; 151 ktime_t entry_time;
137 s64 vnmi_blocked_time; 152 s64 vnmi_blocked_time;
138 u32 exit_reason; 153 u32 exit_reason;
154
155 bool rdtscp_enabled;
139}; 156};
140 157
141static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) 158static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -210,7 +227,7 @@ static const u32 vmx_msr_index[] = {
210#ifdef CONFIG_X86_64 227#ifdef CONFIG_X86_64
211 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 228 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
212#endif 229#endif
213 MSR_EFER, MSR_K6_STAR, 230 MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
214}; 231};
215#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) 232#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
216 233
@@ -301,6 +318,11 @@ static inline bool cpu_has_vmx_ept_2m_page(void)
301 return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT); 318 return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
302} 319}
303 320
321static inline bool cpu_has_vmx_ept_1g_page(void)
322{
323 return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
324}
325
304static inline int cpu_has_vmx_invept_individual_addr(void) 326static inline int cpu_has_vmx_invept_individual_addr(void)
305{ 327{
306 return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT); 328 return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
@@ -336,9 +358,7 @@ static inline int cpu_has_vmx_ple(void)
336 358
337static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) 359static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
338{ 360{
339 return flexpriority_enabled && 361 return flexpriority_enabled && irqchip_in_kernel(kvm);
340 (cpu_has_vmx_virtualize_apic_accesses()) &&
341 (irqchip_in_kernel(kvm));
342} 362}
343 363
344static inline int cpu_has_vmx_vpid(void) 364static inline int cpu_has_vmx_vpid(void)
@@ -347,6 +367,12 @@ static inline int cpu_has_vmx_vpid(void)
347 SECONDARY_EXEC_ENABLE_VPID; 367 SECONDARY_EXEC_ENABLE_VPID;
348} 368}
349 369
370static inline int cpu_has_vmx_rdtscp(void)
371{
372 return vmcs_config.cpu_based_2nd_exec_ctrl &
373 SECONDARY_EXEC_RDTSCP;
374}
375
350static inline int cpu_has_virtual_nmis(void) 376static inline int cpu_has_virtual_nmis(void)
351{ 377{
352 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; 378 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
@@ -551,22 +577,18 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
551{ 577{
552 u32 eb; 578 u32 eb;
553 579
554 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR); 580 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
555 if (!vcpu->fpu_active) 581 (1u << NM_VECTOR) | (1u << DB_VECTOR);
556 eb |= 1u << NM_VECTOR; 582 if ((vcpu->guest_debug &
557 /* 583 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
558 * Unconditionally intercept #DB so we can maintain dr6 without 584 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
559 * reading it every exit. 585 eb |= 1u << BP_VECTOR;
560 */
561 eb |= 1u << DB_VECTOR;
562 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
563 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
564 eb |= 1u << BP_VECTOR;
565 }
566 if (to_vmx(vcpu)->rmode.vm86_active) 586 if (to_vmx(vcpu)->rmode.vm86_active)
567 eb = ~0; 587 eb = ~0;
568 if (enable_ept) 588 if (enable_ept)
569 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ 589 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
590 if (vcpu->fpu_active)
591 eb &= ~(1u << NM_VECTOR);
570 vmcs_write32(EXCEPTION_BITMAP, eb); 592 vmcs_write32(EXCEPTION_BITMAP, eb);
571} 593}
572 594
@@ -589,7 +611,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
589 u64 guest_efer; 611 u64 guest_efer;
590 u64 ignore_bits; 612 u64 ignore_bits;
591 613
592 guest_efer = vmx->vcpu.arch.shadow_efer; 614 guest_efer = vmx->vcpu.arch.efer;
593 615
594 /* 616 /*
595 * NX is emulated; LMA and LME handled by hardware; SCE meaninless 617 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
@@ -767,22 +789,30 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
767 789
768static void vmx_fpu_activate(struct kvm_vcpu *vcpu) 790static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
769{ 791{
792 ulong cr0;
793
770 if (vcpu->fpu_active) 794 if (vcpu->fpu_active)
771 return; 795 return;
772 vcpu->fpu_active = 1; 796 vcpu->fpu_active = 1;
773 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); 797 cr0 = vmcs_readl(GUEST_CR0);
774 if (vcpu->arch.cr0 & X86_CR0_TS) 798 cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
775 vmcs_set_bits(GUEST_CR0, X86_CR0_TS); 799 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
800 vmcs_writel(GUEST_CR0, cr0);
776 update_exception_bitmap(vcpu); 801 update_exception_bitmap(vcpu);
802 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
803 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
777} 804}
778 805
806static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
807
779static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) 808static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
780{ 809{
781 if (!vcpu->fpu_active) 810 vmx_decache_cr0_guest_bits(vcpu);
782 return; 811 vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
783 vcpu->fpu_active = 0;
784 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
785 update_exception_bitmap(vcpu); 812 update_exception_bitmap(vcpu);
813 vcpu->arch.cr0_guest_owned_bits = 0;
814 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
815 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
786} 816}
787 817
788static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 818static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -878,6 +908,11 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
878 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 908 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
879} 909}
880 910
911static bool vmx_rdtscp_supported(void)
912{
913 return cpu_has_vmx_rdtscp();
914}
915
881/* 916/*
882 * Swap MSR entry in host/guest MSR entry array. 917 * Swap MSR entry in host/guest MSR entry array.
883 */ 918 */
@@ -913,12 +948,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
913 index = __find_msr_index(vmx, MSR_CSTAR); 948 index = __find_msr_index(vmx, MSR_CSTAR);
914 if (index >= 0) 949 if (index >= 0)
915 move_msr_up(vmx, index, save_nmsrs++); 950 move_msr_up(vmx, index, save_nmsrs++);
951 index = __find_msr_index(vmx, MSR_TSC_AUX);
952 if (index >= 0 && vmx->rdtscp_enabled)
953 move_msr_up(vmx, index, save_nmsrs++);
916 /* 954 /*
917 * MSR_K6_STAR is only needed on long mode guests, and only 955 * MSR_K6_STAR is only needed on long mode guests, and only
918 * if efer.sce is enabled. 956 * if efer.sce is enabled.
919 */ 957 */
920 index = __find_msr_index(vmx, MSR_K6_STAR); 958 index = __find_msr_index(vmx, MSR_K6_STAR);
921 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) 959 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
922 move_msr_up(vmx, index, save_nmsrs++); 960 move_msr_up(vmx, index, save_nmsrs++);
923 } 961 }
924#endif 962#endif
@@ -1002,6 +1040,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1002 case MSR_IA32_SYSENTER_ESP: 1040 case MSR_IA32_SYSENTER_ESP:
1003 data = vmcs_readl(GUEST_SYSENTER_ESP); 1041 data = vmcs_readl(GUEST_SYSENTER_ESP);
1004 break; 1042 break;
1043 case MSR_TSC_AUX:
1044 if (!to_vmx(vcpu)->rdtscp_enabled)
1045 return 1;
1046 /* Otherwise falls through */
1005 default: 1047 default:
1006 vmx_load_host_state(to_vmx(vcpu)); 1048 vmx_load_host_state(to_vmx(vcpu));
1007 msr = find_msr_entry(to_vmx(vcpu), msr_index); 1049 msr = find_msr_entry(to_vmx(vcpu), msr_index);
@@ -1065,7 +1107,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1065 vcpu->arch.pat = data; 1107 vcpu->arch.pat = data;
1066 break; 1108 break;
1067 } 1109 }
1068 /* Otherwise falls through to kvm_set_msr_common */ 1110 ret = kvm_set_msr_common(vcpu, msr_index, data);
1111 break;
1112 case MSR_TSC_AUX:
1113 if (!vmx->rdtscp_enabled)
1114 return 1;
1115 /* Check reserved bit, higher 32 bits should be zero */
1116 if ((data >> 32) != 0)
1117 return 1;
1118 /* Otherwise falls through */
1069 default: 1119 default:
1070 msr = find_msr_entry(vmx, msr_index); 1120 msr = find_msr_entry(vmx, msr_index);
1071 if (msr) { 1121 if (msr) {
@@ -1224,6 +1274,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1224 CPU_BASED_USE_IO_BITMAPS | 1274 CPU_BASED_USE_IO_BITMAPS |
1225 CPU_BASED_MOV_DR_EXITING | 1275 CPU_BASED_MOV_DR_EXITING |
1226 CPU_BASED_USE_TSC_OFFSETING | 1276 CPU_BASED_USE_TSC_OFFSETING |
1277 CPU_BASED_MWAIT_EXITING |
1278 CPU_BASED_MONITOR_EXITING |
1227 CPU_BASED_INVLPG_EXITING; 1279 CPU_BASED_INVLPG_EXITING;
1228 opt = CPU_BASED_TPR_SHADOW | 1280 opt = CPU_BASED_TPR_SHADOW |
1229 CPU_BASED_USE_MSR_BITMAPS | 1281 CPU_BASED_USE_MSR_BITMAPS |
@@ -1243,7 +1295,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1243 SECONDARY_EXEC_ENABLE_VPID | 1295 SECONDARY_EXEC_ENABLE_VPID |
1244 SECONDARY_EXEC_ENABLE_EPT | 1296 SECONDARY_EXEC_ENABLE_EPT |
1245 SECONDARY_EXEC_UNRESTRICTED_GUEST | 1297 SECONDARY_EXEC_UNRESTRICTED_GUEST |
1246 SECONDARY_EXEC_PAUSE_LOOP_EXITING; 1298 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
1299 SECONDARY_EXEC_RDTSCP;
1247 if (adjust_vmx_controls(min2, opt2, 1300 if (adjust_vmx_controls(min2, opt2,
1248 MSR_IA32_VMX_PROCBASED_CTLS2, 1301 MSR_IA32_VMX_PROCBASED_CTLS2,
1249 &_cpu_based_2nd_exec_control) < 0) 1302 &_cpu_based_2nd_exec_control) < 0)
@@ -1457,8 +1510,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1457static gva_t rmode_tss_base(struct kvm *kvm) 1510static gva_t rmode_tss_base(struct kvm *kvm)
1458{ 1511{
1459 if (!kvm->arch.tss_addr) { 1512 if (!kvm->arch.tss_addr) {
1460 gfn_t base_gfn = kvm->memslots[0].base_gfn + 1513 struct kvm_memslots *slots;
1461 kvm->memslots[0].npages - 3; 1514 gfn_t base_gfn;
1515
1516 slots = rcu_dereference(kvm->memslots);
1517 base_gfn = kvm->memslots->memslots[0].base_gfn +
1518 kvm->memslots->memslots[0].npages - 3;
1462 return base_gfn << PAGE_SHIFT; 1519 return base_gfn << PAGE_SHIFT;
1463 } 1520 }
1464 return kvm->arch.tss_addr; 1521 return kvm->arch.tss_addr;
@@ -1544,9 +1601,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1544 * of this msr depends on is_long_mode(). 1601 * of this msr depends on is_long_mode().
1545 */ 1602 */
1546 vmx_load_host_state(to_vmx(vcpu)); 1603 vmx_load_host_state(to_vmx(vcpu));
1547 vcpu->arch.shadow_efer = efer; 1604 vcpu->arch.efer = efer;
1548 if (!msr)
1549 return;
1550 if (efer & EFER_LMA) { 1605 if (efer & EFER_LMA) {
1551 vmcs_write32(VM_ENTRY_CONTROLS, 1606 vmcs_write32(VM_ENTRY_CONTROLS,
1552 vmcs_read32(VM_ENTRY_CONTROLS) | 1607 vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1576,13 +1631,13 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1576 (guest_tr_ar & ~AR_TYPE_MASK) 1631 (guest_tr_ar & ~AR_TYPE_MASK)
1577 | AR_TYPE_BUSY_64_TSS); 1632 | AR_TYPE_BUSY_64_TSS);
1578 } 1633 }
1579 vcpu->arch.shadow_efer |= EFER_LMA; 1634 vcpu->arch.efer |= EFER_LMA;
1580 vmx_set_efer(vcpu, vcpu->arch.shadow_efer); 1635 vmx_set_efer(vcpu, vcpu->arch.efer);
1581} 1636}
1582 1637
1583static void exit_lmode(struct kvm_vcpu *vcpu) 1638static void exit_lmode(struct kvm_vcpu *vcpu)
1584{ 1639{
1585 vcpu->arch.shadow_efer &= ~EFER_LMA; 1640 vcpu->arch.efer &= ~EFER_LMA;
1586 1641
1587 vmcs_write32(VM_ENTRY_CONTROLS, 1642 vmcs_write32(VM_ENTRY_CONTROLS,
1588 vmcs_read32(VM_ENTRY_CONTROLS) 1643 vmcs_read32(VM_ENTRY_CONTROLS)
@@ -1598,10 +1653,20 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1598 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); 1653 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
1599} 1654}
1600 1655
1656static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1657{
1658 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
1659
1660 vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
1661 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
1662}
1663
1601static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 1664static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1602{ 1665{
1603 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; 1666 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
1604 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; 1667
1668 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
1669 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
1605} 1670}
1606 1671
1607static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 1672static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1646,7 +1711,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1646 (CPU_BASED_CR3_LOAD_EXITING | 1711 (CPU_BASED_CR3_LOAD_EXITING |
1647 CPU_BASED_CR3_STORE_EXITING)); 1712 CPU_BASED_CR3_STORE_EXITING));
1648 vcpu->arch.cr0 = cr0; 1713 vcpu->arch.cr0 = cr0;
1649 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1714 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1650 } else if (!is_paging(vcpu)) { 1715 } else if (!is_paging(vcpu)) {
1651 /* From nonpaging to paging */ 1716 /* From nonpaging to paging */
1652 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1717 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1654,23 +1719,13 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1654 ~(CPU_BASED_CR3_LOAD_EXITING | 1719 ~(CPU_BASED_CR3_LOAD_EXITING |
1655 CPU_BASED_CR3_STORE_EXITING)); 1720 CPU_BASED_CR3_STORE_EXITING));
1656 vcpu->arch.cr0 = cr0; 1721 vcpu->arch.cr0 = cr0;
1657 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1722 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1658 } 1723 }
1659 1724
1660 if (!(cr0 & X86_CR0_WP)) 1725 if (!(cr0 & X86_CR0_WP))
1661 *hw_cr0 &= ~X86_CR0_WP; 1726 *hw_cr0 &= ~X86_CR0_WP;
1662} 1727}
1663 1728
1664static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
1665 struct kvm_vcpu *vcpu)
1666{
1667 if (!is_paging(vcpu)) {
1668 *hw_cr4 &= ~X86_CR4_PAE;
1669 *hw_cr4 |= X86_CR4_PSE;
1670 } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
1671 *hw_cr4 &= ~X86_CR4_PAE;
1672}
1673
1674static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1729static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1675{ 1730{
1676 struct vcpu_vmx *vmx = to_vmx(vcpu); 1731 struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1682,8 +1737,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1682 else 1737 else
1683 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; 1738 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
1684 1739
1685 vmx_fpu_deactivate(vcpu);
1686
1687 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 1740 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
1688 enter_pmode(vcpu); 1741 enter_pmode(vcpu);
1689 1742
@@ -1691,7 +1744,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1691 enter_rmode(vcpu); 1744 enter_rmode(vcpu);
1692 1745
1693#ifdef CONFIG_X86_64 1746#ifdef CONFIG_X86_64
1694 if (vcpu->arch.shadow_efer & EFER_LME) { 1747 if (vcpu->arch.efer & EFER_LME) {
1695 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) 1748 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1696 enter_lmode(vcpu); 1749 enter_lmode(vcpu);
1697 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) 1750 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
@@ -1702,12 +1755,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1702 if (enable_ept) 1755 if (enable_ept)
1703 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); 1756 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
1704 1757
1758 if (!vcpu->fpu_active)
1759 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
1760
1705 vmcs_writel(CR0_READ_SHADOW, cr0); 1761 vmcs_writel(CR0_READ_SHADOW, cr0);
1706 vmcs_writel(GUEST_CR0, hw_cr0); 1762 vmcs_writel(GUEST_CR0, hw_cr0);
1707 vcpu->arch.cr0 = cr0; 1763 vcpu->arch.cr0 = cr0;
1708
1709 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1710 vmx_fpu_activate(vcpu);
1711} 1764}
1712 1765
1713static u64 construct_eptp(unsigned long root_hpa) 1766static u64 construct_eptp(unsigned long root_hpa)
@@ -1738,8 +1791,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1738 1791
1739 vmx_flush_tlb(vcpu); 1792 vmx_flush_tlb(vcpu);
1740 vmcs_writel(GUEST_CR3, guest_cr3); 1793 vmcs_writel(GUEST_CR3, guest_cr3);
1741 if (vcpu->arch.cr0 & X86_CR0_PE)
1742 vmx_fpu_deactivate(vcpu);
1743} 1794}
1744 1795
1745static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1796static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1748,8 +1799,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1748 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); 1799 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1749 1800
1750 vcpu->arch.cr4 = cr4; 1801 vcpu->arch.cr4 = cr4;
1751 if (enable_ept) 1802 if (enable_ept) {
1752 ept_update_paging_mode_cr4(&hw_cr4, vcpu); 1803 if (!is_paging(vcpu)) {
1804 hw_cr4 &= ~X86_CR4_PAE;
1805 hw_cr4 |= X86_CR4_PSE;
1806 } else if (!(cr4 & X86_CR4_PAE)) {
1807 hw_cr4 &= ~X86_CR4_PAE;
1808 }
1809 }
1753 1810
1754 vmcs_writel(CR4_READ_SHADOW, cr4); 1811 vmcs_writel(CR4_READ_SHADOW, cr4);
1755 vmcs_writel(GUEST_CR4, hw_cr4); 1812 vmcs_writel(GUEST_CR4, hw_cr4);
@@ -1787,7 +1844,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
1787 1844
1788static int vmx_get_cpl(struct kvm_vcpu *vcpu) 1845static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1789{ 1846{
1790 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */ 1847 if (!is_protmode(vcpu))
1791 return 0; 1848 return 0;
1792 1849
1793 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ 1850 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
@@ -2042,7 +2099,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
2042static bool guest_state_valid(struct kvm_vcpu *vcpu) 2099static bool guest_state_valid(struct kvm_vcpu *vcpu)
2043{ 2100{
2044 /* real mode guest state checks */ 2101 /* real mode guest state checks */
2045 if (!(vcpu->arch.cr0 & X86_CR0_PE)) { 2102 if (!is_protmode(vcpu)) {
2046 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 2103 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
2047 return false; 2104 return false;
2048 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 2105 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
@@ -2175,7 +2232,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
2175 struct kvm_userspace_memory_region kvm_userspace_mem; 2232 struct kvm_userspace_memory_region kvm_userspace_mem;
2176 int r = 0; 2233 int r = 0;
2177 2234
2178 down_write(&kvm->slots_lock); 2235 mutex_lock(&kvm->slots_lock);
2179 if (kvm->arch.apic_access_page) 2236 if (kvm->arch.apic_access_page)
2180 goto out; 2237 goto out;
2181 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 2238 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -2188,7 +2245,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
2188 2245
2189 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); 2246 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
2190out: 2247out:
2191 up_write(&kvm->slots_lock); 2248 mutex_unlock(&kvm->slots_lock);
2192 return r; 2249 return r;
2193} 2250}
2194 2251
@@ -2197,7 +2254,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
2197 struct kvm_userspace_memory_region kvm_userspace_mem; 2254 struct kvm_userspace_memory_region kvm_userspace_mem;
2198 int r = 0; 2255 int r = 0;
2199 2256
2200 down_write(&kvm->slots_lock); 2257 mutex_lock(&kvm->slots_lock);
2201 if (kvm->arch.ept_identity_pagetable) 2258 if (kvm->arch.ept_identity_pagetable)
2202 goto out; 2259 goto out;
2203 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 2260 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
@@ -2212,7 +2269,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
2212 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, 2269 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
2213 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); 2270 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
2214out: 2271out:
2215 up_write(&kvm->slots_lock); 2272 mutex_unlock(&kvm->slots_lock);
2216 return r; 2273 return r;
2217} 2274}
2218 2275
@@ -2384,14 +2441,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2384 for (i = 0; i < NR_VMX_MSR; ++i) { 2441 for (i = 0; i < NR_VMX_MSR; ++i) {
2385 u32 index = vmx_msr_index[i]; 2442 u32 index = vmx_msr_index[i];
2386 u32 data_low, data_high; 2443 u32 data_low, data_high;
2387 u64 data;
2388 int j = vmx->nmsrs; 2444 int j = vmx->nmsrs;
2389 2445
2390 if (rdmsr_safe(index, &data_low, &data_high) < 0) 2446 if (rdmsr_safe(index, &data_low, &data_high) < 0)
2391 continue; 2447 continue;
2392 if (wrmsr_safe(index, data_low, data_high) < 0) 2448 if (wrmsr_safe(index, data_low, data_high) < 0)
2393 continue; 2449 continue;
2394 data = data_low | ((u64)data_high << 32);
2395 vmx->guest_msrs[j].index = i; 2450 vmx->guest_msrs[j].index = i;
2396 vmx->guest_msrs[j].data = 0; 2451 vmx->guest_msrs[j].data = 0;
2397 vmx->guest_msrs[j].mask = -1ull; 2452 vmx->guest_msrs[j].mask = -1ull;
@@ -2404,7 +2459,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2404 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); 2459 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
2405 2460
2406 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 2461 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2407 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 2462 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
2463 if (enable_ept)
2464 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
2465 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
2408 2466
2409 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; 2467 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2410 rdtscll(tsc_this); 2468 rdtscll(tsc_this);
@@ -2429,10 +2487,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2429{ 2487{
2430 struct vcpu_vmx *vmx = to_vmx(vcpu); 2488 struct vcpu_vmx *vmx = to_vmx(vcpu);
2431 u64 msr; 2489 u64 msr;
2432 int ret; 2490 int ret, idx;
2433 2491
2434 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 2492 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2435 down_read(&vcpu->kvm->slots_lock); 2493 idx = srcu_read_lock(&vcpu->kvm->srcu);
2436 if (!init_rmode(vmx->vcpu.kvm)) { 2494 if (!init_rmode(vmx->vcpu.kvm)) {
2437 ret = -ENOMEM; 2495 ret = -ENOMEM;
2438 goto out; 2496 goto out;
@@ -2526,7 +2584,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2526 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2584 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2527 2585
2528 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 2586 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
2529 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ 2587 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
2530 vmx_set_cr4(&vmx->vcpu, 0); 2588 vmx_set_cr4(&vmx->vcpu, 0);
2531 vmx_set_efer(&vmx->vcpu, 0); 2589 vmx_set_efer(&vmx->vcpu, 0);
2532 vmx_fpu_activate(&vmx->vcpu); 2590 vmx_fpu_activate(&vmx->vcpu);
@@ -2540,7 +2598,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2540 vmx->emulation_required = 0; 2598 vmx->emulation_required = 0;
2541 2599
2542out: 2600out:
2543 up_read(&vcpu->kvm->slots_lock); 2601 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2544 return ret; 2602 return ret;
2545} 2603}
2546 2604
@@ -2717,6 +2775,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2717 kvm_queue_exception(vcpu, vec); 2775 kvm_queue_exception(vcpu, vec);
2718 return 1; 2776 return 1;
2719 case BP_VECTOR: 2777 case BP_VECTOR:
2778 /*
2779 * Update instruction length as we may reinject the exception
2780 * from user space while in guest debugging mode.
2781 */
2782 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
2783 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2720 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 2784 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2721 return 0; 2785 return 0;
2722 /* fall through */ 2786 /* fall through */
@@ -2839,6 +2903,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
2839 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); 2903 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2840 /* fall through */ 2904 /* fall through */
2841 case BP_VECTOR: 2905 case BP_VECTOR:
2906 /*
2907 * Update instruction length as we may reinject #BP from
2908 * user space while in guest debugging mode. Reading it for
2909 * #DB as well causes no harm, it is not used in that case.
2910 */
2911 vmx->vcpu.arch.event_exit_inst_len =
2912 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2842 kvm_run->exit_reason = KVM_EXIT_DEBUG; 2913 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2843 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; 2914 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2844 kvm_run->debug.arch.exception = ex_no; 2915 kvm_run->debug.arch.exception = ex_no;
@@ -2940,11 +3011,10 @@ static int handle_cr(struct kvm_vcpu *vcpu)
2940 }; 3011 };
2941 break; 3012 break;
2942 case 2: /* clts */ 3013 case 2: /* clts */
2943 vmx_fpu_deactivate(vcpu); 3014 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
2944 vcpu->arch.cr0 &= ~X86_CR0_TS; 3015 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
2945 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
2946 vmx_fpu_activate(vcpu);
2947 skip_emulated_instruction(vcpu); 3016 skip_emulated_instruction(vcpu);
3017 vmx_fpu_activate(vcpu);
2948 return 1; 3018 return 1;
2949 case 1: /*mov from cr*/ 3019 case 1: /*mov from cr*/
2950 switch (cr) { 3020 switch (cr) {
@@ -2962,7 +3032,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
2962 } 3032 }
2963 break; 3033 break;
2964 case 3: /* lmsw */ 3034 case 3: /* lmsw */
2965 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); 3035 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
3036 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
3037 kvm_lmsw(vcpu, val);
2966 3038
2967 skip_emulated_instruction(vcpu); 3039 skip_emulated_instruction(vcpu);
2968 return 1; 3040 return 1;
@@ -2975,12 +3047,22 @@ static int handle_cr(struct kvm_vcpu *vcpu)
2975 return 0; 3047 return 0;
2976} 3048}
2977 3049
3050static int check_dr_alias(struct kvm_vcpu *vcpu)
3051{
3052 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
3053 kvm_queue_exception(vcpu, UD_VECTOR);
3054 return -1;
3055 }
3056 return 0;
3057}
3058
2978static int handle_dr(struct kvm_vcpu *vcpu) 3059static int handle_dr(struct kvm_vcpu *vcpu)
2979{ 3060{
2980 unsigned long exit_qualification; 3061 unsigned long exit_qualification;
2981 unsigned long val; 3062 unsigned long val;
2982 int dr, reg; 3063 int dr, reg;
2983 3064
3065 /* Do not handle if the CPL > 0, will trigger GP on re-entry */
2984 if (!kvm_require_cpl(vcpu, 0)) 3066 if (!kvm_require_cpl(vcpu, 0))
2985 return 1; 3067 return 1;
2986 dr = vmcs_readl(GUEST_DR7); 3068 dr = vmcs_readl(GUEST_DR7);
@@ -3016,14 +3098,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
3016 case 0 ... 3: 3098 case 0 ... 3:
3017 val = vcpu->arch.db[dr]; 3099 val = vcpu->arch.db[dr];
3018 break; 3100 break;
3101 case 4:
3102 if (check_dr_alias(vcpu) < 0)
3103 return 1;
3104 /* fall through */
3019 case 6: 3105 case 6:
3020 val = vcpu->arch.dr6; 3106 val = vcpu->arch.dr6;
3021 break; 3107 break;
3022 case 7: 3108 case 5:
3109 if (check_dr_alias(vcpu) < 0)
3110 return 1;
3111 /* fall through */
3112 default: /* 7 */
3023 val = vcpu->arch.dr7; 3113 val = vcpu->arch.dr7;
3024 break; 3114 break;
3025 default:
3026 val = 0;
3027 } 3115 }
3028 kvm_register_write(vcpu, reg, val); 3116 kvm_register_write(vcpu, reg, val);
3029 } else { 3117 } else {
@@ -3034,21 +3122,25 @@ static int handle_dr(struct kvm_vcpu *vcpu)
3034 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 3122 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
3035 vcpu->arch.eff_db[dr] = val; 3123 vcpu->arch.eff_db[dr] = val;
3036 break; 3124 break;
3037 case 4 ... 5: 3125 case 4:
3038 if (vcpu->arch.cr4 & X86_CR4_DE) 3126 if (check_dr_alias(vcpu) < 0)
3039 kvm_queue_exception(vcpu, UD_VECTOR); 3127 return 1;
3040 break; 3128 /* fall through */
3041 case 6: 3129 case 6:
3042 if (val & 0xffffffff00000000ULL) { 3130 if (val & 0xffffffff00000000ULL) {
3043 kvm_queue_exception(vcpu, GP_VECTOR); 3131 kvm_inject_gp(vcpu, 0);
3044 break; 3132 return 1;
3045 } 3133 }
3046 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; 3134 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
3047 break; 3135 break;
3048 case 7: 3136 case 5:
3137 if (check_dr_alias(vcpu) < 0)
3138 return 1;
3139 /* fall through */
3140 default: /* 7 */
3049 if (val & 0xffffffff00000000ULL) { 3141 if (val & 0xffffffff00000000ULL) {
3050 kvm_queue_exception(vcpu, GP_VECTOR); 3142 kvm_inject_gp(vcpu, 0);
3051 break; 3143 return 1;
3052 } 3144 }
3053 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 3145 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
3054 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 3146 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
@@ -3075,6 +3167,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
3075 u64 data; 3167 u64 data;
3076 3168
3077 if (vmx_get_msr(vcpu, ecx, &data)) { 3169 if (vmx_get_msr(vcpu, ecx, &data)) {
3170 trace_kvm_msr_read_ex(ecx);
3078 kvm_inject_gp(vcpu, 0); 3171 kvm_inject_gp(vcpu, 0);
3079 return 1; 3172 return 1;
3080 } 3173 }
@@ -3094,13 +3187,13 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
3094 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) 3187 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
3095 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); 3188 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
3096 3189
3097 trace_kvm_msr_write(ecx, data);
3098
3099 if (vmx_set_msr(vcpu, ecx, data) != 0) { 3190 if (vmx_set_msr(vcpu, ecx, data) != 0) {
3191 trace_kvm_msr_write_ex(ecx, data);
3100 kvm_inject_gp(vcpu, 0); 3192 kvm_inject_gp(vcpu, 0);
3101 return 1; 3193 return 1;
3102 } 3194 }
3103 3195
3196 trace_kvm_msr_write(ecx, data);
3104 skip_emulated_instruction(vcpu); 3197 skip_emulated_instruction(vcpu);
3105 return 1; 3198 return 1;
3106} 3199}
@@ -3385,7 +3478,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
3385 } 3478 }
3386 3479
3387 if (err != EMULATE_DONE) { 3480 if (err != EMULATE_DONE) {
3388 kvm_report_emulation_failure(vcpu, "emulation failure");
3389 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3481 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3390 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 3482 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3391 vcpu->run->internal.ndata = 0; 3483 vcpu->run->internal.ndata = 0;
@@ -3416,6 +3508,12 @@ static int handle_pause(struct kvm_vcpu *vcpu)
3416 return 1; 3508 return 1;
3417} 3509}
3418 3510
3511static int handle_invalid_op(struct kvm_vcpu *vcpu)
3512{
3513 kvm_queue_exception(vcpu, UD_VECTOR);
3514 return 1;
3515}
3516
3419/* 3517/*
3420 * The exit handlers return 1 if the exit was handled fully and guest execution 3518 * The exit handlers return 1 if the exit was handled fully and guest execution
3421 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 3519 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -3453,6 +3551,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3453 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, 3551 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
3454 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, 3552 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
3455 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 3553 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
3554 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
3555 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
3456}; 3556};
3457 3557
3458static const int kvm_vmx_max_exit_handlers = 3558static const int kvm_vmx_max_exit_handlers =
@@ -3686,9 +3786,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
3686 */ 3786 */
3687 vmcs_writel(HOST_CR0, read_cr0()); 3787 vmcs_writel(HOST_CR0, read_cr0());
3688 3788
3689 if (vcpu->arch.switch_db_regs)
3690 set_debugreg(vcpu->arch.dr6, 6);
3691
3692 asm( 3789 asm(
3693 /* Store host registers */ 3790 /* Store host registers */
3694 "push %%"R"dx; push %%"R"bp;" 3791 "push %%"R"dx; push %%"R"bp;"
@@ -3789,9 +3886,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
3789 | (1 << VCPU_EXREG_PDPTR)); 3886 | (1 << VCPU_EXREG_PDPTR));
3790 vcpu->arch.regs_dirty = 0; 3887 vcpu->arch.regs_dirty = 0;
3791 3888
3792 if (vcpu->arch.switch_db_regs)
3793 get_debugreg(vcpu->arch.dr6, 6);
3794
3795 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 3889 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3796 if (vmx->rmode.irq.pending) 3890 if (vmx->rmode.irq.pending)
3797 fixup_rmode_irq(vmx); 3891 fixup_rmode_irq(vmx);
@@ -3920,7 +4014,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3920 * b. VT-d with snooping control feature: snooping control feature of 4014 * b. VT-d with snooping control feature: snooping control feature of
3921 * VT-d engine can guarantee the cache correctness. Just set it 4015 * VT-d engine can guarantee the cache correctness. Just set it
3922 * to WB to keep consistent with host. So the same as item 3. 4016 * to WB to keep consistent with host. So the same as item 3.
3923 * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep 4017 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
3924 * consistent with host MTRR 4018 * consistent with host MTRR
3925 */ 4019 */
3926 if (is_mmio) 4020 if (is_mmio)
@@ -3931,37 +4025,88 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3931 VMX_EPT_MT_EPTE_SHIFT; 4025 VMX_EPT_MT_EPTE_SHIFT;
3932 else 4026 else
3933 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) 4027 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
3934 | VMX_EPT_IGMT_BIT; 4028 | VMX_EPT_IPAT_BIT;
3935 4029
3936 return ret; 4030 return ret;
3937} 4031}
3938 4032
4033#define _ER(x) { EXIT_REASON_##x, #x }
4034
3939static const struct trace_print_flags vmx_exit_reasons_str[] = { 4035static const struct trace_print_flags vmx_exit_reasons_str[] = {
3940 { EXIT_REASON_EXCEPTION_NMI, "exception" }, 4036 _ER(EXCEPTION_NMI),
3941 { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" }, 4037 _ER(EXTERNAL_INTERRUPT),
3942 { EXIT_REASON_TRIPLE_FAULT, "triple_fault" }, 4038 _ER(TRIPLE_FAULT),
3943 { EXIT_REASON_NMI_WINDOW, "nmi_window" }, 4039 _ER(PENDING_INTERRUPT),
3944 { EXIT_REASON_IO_INSTRUCTION, "io_instruction" }, 4040 _ER(NMI_WINDOW),
3945 { EXIT_REASON_CR_ACCESS, "cr_access" }, 4041 _ER(TASK_SWITCH),
3946 { EXIT_REASON_DR_ACCESS, "dr_access" }, 4042 _ER(CPUID),
3947 { EXIT_REASON_CPUID, "cpuid" }, 4043 _ER(HLT),
3948 { EXIT_REASON_MSR_READ, "rdmsr" }, 4044 _ER(INVLPG),
3949 { EXIT_REASON_MSR_WRITE, "wrmsr" }, 4045 _ER(RDPMC),
3950 { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" }, 4046 _ER(RDTSC),
3951 { EXIT_REASON_HLT, "halt" }, 4047 _ER(VMCALL),
3952 { EXIT_REASON_INVLPG, "invlpg" }, 4048 _ER(VMCLEAR),
3953 { EXIT_REASON_VMCALL, "hypercall" }, 4049 _ER(VMLAUNCH),
3954 { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" }, 4050 _ER(VMPTRLD),
3955 { EXIT_REASON_APIC_ACCESS, "apic_access" }, 4051 _ER(VMPTRST),
3956 { EXIT_REASON_WBINVD, "wbinvd" }, 4052 _ER(VMREAD),
3957 { EXIT_REASON_TASK_SWITCH, "task_switch" }, 4053 _ER(VMRESUME),
3958 { EXIT_REASON_EPT_VIOLATION, "ept_violation" }, 4054 _ER(VMWRITE),
4055 _ER(VMOFF),
4056 _ER(VMON),
4057 _ER(CR_ACCESS),
4058 _ER(DR_ACCESS),
4059 _ER(IO_INSTRUCTION),
4060 _ER(MSR_READ),
4061 _ER(MSR_WRITE),
4062 _ER(MWAIT_INSTRUCTION),
4063 _ER(MONITOR_INSTRUCTION),
4064 _ER(PAUSE_INSTRUCTION),
4065 _ER(MCE_DURING_VMENTRY),
4066 _ER(TPR_BELOW_THRESHOLD),
4067 _ER(APIC_ACCESS),
4068 _ER(EPT_VIOLATION),
4069 _ER(EPT_MISCONFIG),
4070 _ER(WBINVD),
3959 { -1, NULL } 4071 { -1, NULL }
3960}; 4072};
3961 4073
3962static bool vmx_gb_page_enable(void) 4074#undef _ER
4075
4076static int vmx_get_lpage_level(void)
4077{
4078 if (enable_ept && !cpu_has_vmx_ept_1g_page())
4079 return PT_DIRECTORY_LEVEL;
4080 else
4081 /* For shadow and EPT supported 1GB page */
4082 return PT_PDPE_LEVEL;
4083}
4084
4085static inline u32 bit(int bitno)
4086{
4087 return 1 << (bitno & 31);
4088}
4089
4090static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
3963{ 4091{
3964 return false; 4092 struct kvm_cpuid_entry2 *best;
4093 struct vcpu_vmx *vmx = to_vmx(vcpu);
4094 u32 exec_control;
4095
4096 vmx->rdtscp_enabled = false;
4097 if (vmx_rdtscp_supported()) {
4098 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
4099 if (exec_control & SECONDARY_EXEC_RDTSCP) {
4100 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
4101 if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
4102 vmx->rdtscp_enabled = true;
4103 else {
4104 exec_control &= ~SECONDARY_EXEC_RDTSCP;
4105 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4106 exec_control);
4107 }
4108 }
4109 }
3965} 4110}
3966 4111
3967static struct kvm_x86_ops vmx_x86_ops = { 4112static struct kvm_x86_ops vmx_x86_ops = {
@@ -3990,6 +4135,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
3990 .set_segment = vmx_set_segment, 4135 .set_segment = vmx_set_segment,
3991 .get_cpl = vmx_get_cpl, 4136 .get_cpl = vmx_get_cpl,
3992 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 4137 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
4138 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
3993 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, 4139 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
3994 .set_cr0 = vmx_set_cr0, 4140 .set_cr0 = vmx_set_cr0,
3995 .set_cr3 = vmx_set_cr3, 4141 .set_cr3 = vmx_set_cr3,
@@ -4002,6 +4148,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
4002 .cache_reg = vmx_cache_reg, 4148 .cache_reg = vmx_cache_reg,
4003 .get_rflags = vmx_get_rflags, 4149 .get_rflags = vmx_get_rflags,
4004 .set_rflags = vmx_set_rflags, 4150 .set_rflags = vmx_set_rflags,
4151 .fpu_activate = vmx_fpu_activate,
4152 .fpu_deactivate = vmx_fpu_deactivate,
4005 4153
4006 .tlb_flush = vmx_flush_tlb, 4154 .tlb_flush = vmx_flush_tlb,
4007 4155
@@ -4027,7 +4175,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
4027 .get_mt_mask = vmx_get_mt_mask, 4175 .get_mt_mask = vmx_get_mt_mask,
4028 4176
4029 .exit_reasons_str = vmx_exit_reasons_str, 4177 .exit_reasons_str = vmx_exit_reasons_str,
4030 .gb_page_enable = vmx_gb_page_enable, 4178 .get_lpage_level = vmx_get_lpage_level,
4179
4180 .cpuid_update = vmx_cpuid_update,
4181
4182 .rdtscp_supported = vmx_rdtscp_supported,
4031}; 4183};
4032 4184
4033static int __init vmx_init(void) 4185static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a1e1bc9d412d..e46282a56565 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -38,6 +38,7 @@
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/cpufreq.h> 39#include <linux/cpufreq.h>
40#include <linux/user-return-notifier.h> 40#include <linux/user-return-notifier.h>
41#include <linux/srcu.h>
41#include <trace/events/kvm.h> 42#include <trace/events/kvm.h>
42#undef TRACE_INCLUDE_FILE 43#undef TRACE_INCLUDE_FILE
43#define CREATE_TRACE_POINTS 44#define CREATE_TRACE_POINTS
@@ -93,16 +94,16 @@ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
93 94
94struct kvm_shared_msrs_global { 95struct kvm_shared_msrs_global {
95 int nr; 96 int nr;
96 struct kvm_shared_msr { 97 u32 msrs[KVM_NR_SHARED_MSRS];
97 u32 msr;
98 u64 value;
99 } msrs[KVM_NR_SHARED_MSRS];
100}; 98};
101 99
102struct kvm_shared_msrs { 100struct kvm_shared_msrs {
103 struct user_return_notifier urn; 101 struct user_return_notifier urn;
104 bool registered; 102 bool registered;
105 u64 current_value[KVM_NR_SHARED_MSRS]; 103 struct kvm_shared_msr_values {
104 u64 host;
105 u64 curr;
106 } values[KVM_NR_SHARED_MSRS];
106}; 107};
107 108
108static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 109static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
@@ -147,53 +148,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
147static void kvm_on_user_return(struct user_return_notifier *urn) 148static void kvm_on_user_return(struct user_return_notifier *urn)
148{ 149{
149 unsigned slot; 150 unsigned slot;
150 struct kvm_shared_msr *global;
151 struct kvm_shared_msrs *locals 151 struct kvm_shared_msrs *locals
152 = container_of(urn, struct kvm_shared_msrs, urn); 152 = container_of(urn, struct kvm_shared_msrs, urn);
153 struct kvm_shared_msr_values *values;
153 154
154 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 155 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
155 global = &shared_msrs_global.msrs[slot]; 156 values = &locals->values[slot];
156 if (global->value != locals->current_value[slot]) { 157 if (values->host != values->curr) {
157 wrmsrl(global->msr, global->value); 158 wrmsrl(shared_msrs_global.msrs[slot], values->host);
158 locals->current_value[slot] = global->value; 159 values->curr = values->host;
159 } 160 }
160 } 161 }
161 locals->registered = false; 162 locals->registered = false;
162 user_return_notifier_unregister(urn); 163 user_return_notifier_unregister(urn);
163} 164}
164 165
165void kvm_define_shared_msr(unsigned slot, u32 msr) 166static void shared_msr_update(unsigned slot, u32 msr)
166{ 167{
167 int cpu; 168 struct kvm_shared_msrs *smsr;
168 u64 value; 169 u64 value;
169 170
171 smsr = &__get_cpu_var(shared_msrs);
172 /* only read, and nobody should modify it at this time,
173 * so don't need lock */
174 if (slot >= shared_msrs_global.nr) {
175 printk(KERN_ERR "kvm: invalid MSR slot!");
176 return;
177 }
178 rdmsrl_safe(msr, &value);
179 smsr->values[slot].host = value;
180 smsr->values[slot].curr = value;
181}
182
183void kvm_define_shared_msr(unsigned slot, u32 msr)
184{
170 if (slot >= shared_msrs_global.nr) 185 if (slot >= shared_msrs_global.nr)
171 shared_msrs_global.nr = slot + 1; 186 shared_msrs_global.nr = slot + 1;
172 shared_msrs_global.msrs[slot].msr = msr; 187 shared_msrs_global.msrs[slot] = msr;
173 rdmsrl_safe(msr, &value); 188 /* we need ensured the shared_msr_global have been updated */
174 shared_msrs_global.msrs[slot].value = value; 189 smp_wmb();
175 for_each_online_cpu(cpu)
176 per_cpu(shared_msrs, cpu).current_value[slot] = value;
177} 190}
178EXPORT_SYMBOL_GPL(kvm_define_shared_msr); 191EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
179 192
180static void kvm_shared_msr_cpu_online(void) 193static void kvm_shared_msr_cpu_online(void)
181{ 194{
182 unsigned i; 195 unsigned i;
183 struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);
184 196
185 for (i = 0; i < shared_msrs_global.nr; ++i) 197 for (i = 0; i < shared_msrs_global.nr; ++i)
186 locals->current_value[i] = shared_msrs_global.msrs[i].value; 198 shared_msr_update(i, shared_msrs_global.msrs[i]);
187} 199}
188 200
189void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 201void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
190{ 202{
191 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 203 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
192 204
193 if (((value ^ smsr->current_value[slot]) & mask) == 0) 205 if (((value ^ smsr->values[slot].curr) & mask) == 0)
194 return; 206 return;
195 smsr->current_value[slot] = value; 207 smsr->values[slot].curr = value;
196 wrmsrl(shared_msrs_global.msrs[slot].msr, value); 208 wrmsrl(shared_msrs_global.msrs[slot], value);
197 if (!smsr->registered) { 209 if (!smsr->registered) {
198 smsr->urn.on_user_return = kvm_on_user_return; 210 smsr->urn.on_user_return = kvm_on_user_return;
199 user_return_notifier_register(&smsr->urn); 211 user_return_notifier_register(&smsr->urn);
@@ -257,12 +269,68 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
257} 269}
258EXPORT_SYMBOL_GPL(kvm_set_apic_base); 270EXPORT_SYMBOL_GPL(kvm_set_apic_base);
259 271
272#define EXCPT_BENIGN 0
273#define EXCPT_CONTRIBUTORY 1
274#define EXCPT_PF 2
275
276static int exception_class(int vector)
277{
278 switch (vector) {
279 case PF_VECTOR:
280 return EXCPT_PF;
281 case DE_VECTOR:
282 case TS_VECTOR:
283 case NP_VECTOR:
284 case SS_VECTOR:
285 case GP_VECTOR:
286 return EXCPT_CONTRIBUTORY;
287 default:
288 break;
289 }
290 return EXCPT_BENIGN;
291}
292
293static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
294 unsigned nr, bool has_error, u32 error_code)
295{
296 u32 prev_nr;
297 int class1, class2;
298
299 if (!vcpu->arch.exception.pending) {
300 queue:
301 vcpu->arch.exception.pending = true;
302 vcpu->arch.exception.has_error_code = has_error;
303 vcpu->arch.exception.nr = nr;
304 vcpu->arch.exception.error_code = error_code;
305 return;
306 }
307
308 /* to check exception */
309 prev_nr = vcpu->arch.exception.nr;
310 if (prev_nr == DF_VECTOR) {
311 /* triple fault -> shutdown */
312 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
313 return;
314 }
315 class1 = exception_class(prev_nr);
316 class2 = exception_class(nr);
317 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
318 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
319 /* generate double fault per SDM Table 5-5 */
320 vcpu->arch.exception.pending = true;
321 vcpu->arch.exception.has_error_code = true;
322 vcpu->arch.exception.nr = DF_VECTOR;
323 vcpu->arch.exception.error_code = 0;
324 } else
325 /* replace previous exception with a new one in a hope
326 that instruction re-execution will regenerate lost
327 exception */
328 goto queue;
329}
330
260void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 331void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
261{ 332{
262 WARN_ON(vcpu->arch.exception.pending); 333 kvm_multiple_exception(vcpu, nr, false, 0);
263 vcpu->arch.exception.pending = true;
264 vcpu->arch.exception.has_error_code = false;
265 vcpu->arch.exception.nr = nr;
266} 334}
267EXPORT_SYMBOL_GPL(kvm_queue_exception); 335EXPORT_SYMBOL_GPL(kvm_queue_exception);
268 336
@@ -270,25 +338,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
270 u32 error_code) 338 u32 error_code)
271{ 339{
272 ++vcpu->stat.pf_guest; 340 ++vcpu->stat.pf_guest;
273
274 if (vcpu->arch.exception.pending) {
275 switch(vcpu->arch.exception.nr) {
276 case DF_VECTOR:
277 /* triple fault -> shutdown */
278 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
279 return;
280 case PF_VECTOR:
281 vcpu->arch.exception.nr = DF_VECTOR;
282 vcpu->arch.exception.error_code = 0;
283 return;
284 default:
285 /* replace previous exception with a new one in a hope
286 that instruction re-execution will regenerate lost
287 exception */
288 vcpu->arch.exception.pending = false;
289 break;
290 }
291 }
292 vcpu->arch.cr2 = addr; 341 vcpu->arch.cr2 = addr;
293 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); 342 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
294} 343}
@@ -301,11 +350,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
301 350
302void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 351void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
303{ 352{
304 WARN_ON(vcpu->arch.exception.pending); 353 kvm_multiple_exception(vcpu, nr, true, error_code);
305 vcpu->arch.exception.pending = true;
306 vcpu->arch.exception.has_error_code = true;
307 vcpu->arch.exception.nr = nr;
308 vcpu->arch.exception.error_code = error_code;
309} 354}
310EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 355EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
311 356
@@ -383,12 +428,18 @@ out:
383 428
384void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 429void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
385{ 430{
386 if (cr0 & CR0_RESERVED_BITS) { 431 cr0 |= X86_CR0_ET;
432
433#ifdef CONFIG_X86_64
434 if (cr0 & 0xffffffff00000000UL) {
387 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 435 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
388 cr0, vcpu->arch.cr0); 436 cr0, kvm_read_cr0(vcpu));
389 kvm_inject_gp(vcpu, 0); 437 kvm_inject_gp(vcpu, 0);
390 return; 438 return;
391 } 439 }
440#endif
441
442 cr0 &= ~CR0_RESERVED_BITS;
392 443
393 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { 444 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
394 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); 445 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
@@ -405,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
405 456
406 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 457 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
407#ifdef CONFIG_X86_64 458#ifdef CONFIG_X86_64
408 if ((vcpu->arch.shadow_efer & EFER_LME)) { 459 if ((vcpu->arch.efer & EFER_LME)) {
409 int cs_db, cs_l; 460 int cs_db, cs_l;
410 461
411 if (!is_pae(vcpu)) { 462 if (!is_pae(vcpu)) {
@@ -443,13 +494,13 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
443 494
444void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 495void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
445{ 496{
446 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); 497 kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
447} 498}
448EXPORT_SYMBOL_GPL(kvm_lmsw); 499EXPORT_SYMBOL_GPL(kvm_lmsw);
449 500
450void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 501void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
451{ 502{
452 unsigned long old_cr4 = vcpu->arch.cr4; 503 unsigned long old_cr4 = kvm_read_cr4(vcpu);
453 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; 504 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
454 505
455 if (cr4 & CR4_RESERVED_BITS) { 506 if (cr4 & CR4_RESERVED_BITS) {
@@ -575,9 +626,11 @@ static inline u32 bit(int bitno)
575 * kvm-specific. Those are put in the beginning of the list. 626 * kvm-specific. Those are put in the beginning of the list.
576 */ 627 */
577 628
578#define KVM_SAVE_MSRS_BEGIN 2 629#define KVM_SAVE_MSRS_BEGIN 5
579static u32 msrs_to_save[] = { 630static u32 msrs_to_save[] = {
580 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 631 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
632 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
633 HV_X64_MSR_APIC_ASSIST_PAGE,
581 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 634 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
582 MSR_K6_STAR, 635 MSR_K6_STAR,
583#ifdef CONFIG_X86_64 636#ifdef CONFIG_X86_64
@@ -602,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
602 } 655 }
603 656
604 if (is_paging(vcpu) 657 if (is_paging(vcpu)
605 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) { 658 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
606 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); 659 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
607 kvm_inject_gp(vcpu, 0); 660 kvm_inject_gp(vcpu, 0);
608 return; 661 return;
@@ -633,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
633 kvm_x86_ops->set_efer(vcpu, efer); 686 kvm_x86_ops->set_efer(vcpu, efer);
634 687
635 efer &= ~EFER_LMA; 688 efer &= ~EFER_LMA;
636 efer |= vcpu->arch.shadow_efer & EFER_LMA; 689 efer |= vcpu->arch.efer & EFER_LMA;
637 690
638 vcpu->arch.shadow_efer = efer; 691 vcpu->arch.efer = efer;
639 692
640 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; 693 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
641 kvm_mmu_reset_context(vcpu); 694 kvm_mmu_reset_context(vcpu);
@@ -957,6 +1010,100 @@ out:
957 return r; 1010 return r;
958} 1011}
959 1012
1013static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1014{
1015 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1016}
1017
1018static bool kvm_hv_msr_partition_wide(u32 msr)
1019{
1020 bool r = false;
1021 switch (msr) {
1022 case HV_X64_MSR_GUEST_OS_ID:
1023 case HV_X64_MSR_HYPERCALL:
1024 r = true;
1025 break;
1026 }
1027
1028 return r;
1029}
1030
1031static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1032{
1033 struct kvm *kvm = vcpu->kvm;
1034
1035 switch (msr) {
1036 case HV_X64_MSR_GUEST_OS_ID:
1037 kvm->arch.hv_guest_os_id = data;
1038 /* setting guest os id to zero disables hypercall page */
1039 if (!kvm->arch.hv_guest_os_id)
1040 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1041 break;
1042 case HV_X64_MSR_HYPERCALL: {
1043 u64 gfn;
1044 unsigned long addr;
1045 u8 instructions[4];
1046
1047 /* if guest os id is not set hypercall should remain disabled */
1048 if (!kvm->arch.hv_guest_os_id)
1049 break;
1050 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1051 kvm->arch.hv_hypercall = data;
1052 break;
1053 }
1054 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1055 addr = gfn_to_hva(kvm, gfn);
1056 if (kvm_is_error_hva(addr))
1057 return 1;
1058 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1059 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1060 if (copy_to_user((void __user *)addr, instructions, 4))
1061 return 1;
1062 kvm->arch.hv_hypercall = data;
1063 break;
1064 }
1065 default:
1066 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1067 "data 0x%llx\n", msr, data);
1068 return 1;
1069 }
1070 return 0;
1071}
1072
1073static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1074{
1075 switch (msr) {
1076 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1077 unsigned long addr;
1078
1079 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1080 vcpu->arch.hv_vapic = data;
1081 break;
1082 }
1083 addr = gfn_to_hva(vcpu->kvm, data >>
1084 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1085 if (kvm_is_error_hva(addr))
1086 return 1;
1087 if (clear_user((void __user *)addr, PAGE_SIZE))
1088 return 1;
1089 vcpu->arch.hv_vapic = data;
1090 break;
1091 }
1092 case HV_X64_MSR_EOI:
1093 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1094 case HV_X64_MSR_ICR:
1095 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1096 case HV_X64_MSR_TPR:
1097 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1098 default:
1099 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1100 "data 0x%llx\n", msr, data);
1101 return 1;
1102 }
1103
1104 return 0;
1105}
1106
960int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1107int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
961{ 1108{
962 switch (msr) { 1109 switch (msr) {
@@ -1071,6 +1218,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1071 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " 1218 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1072 "0x%x data 0x%llx\n", msr, data); 1219 "0x%x data 0x%llx\n", msr, data);
1073 break; 1220 break;
1221 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1222 if (kvm_hv_msr_partition_wide(msr)) {
1223 int r;
1224 mutex_lock(&vcpu->kvm->lock);
1225 r = set_msr_hyperv_pw(vcpu, msr, data);
1226 mutex_unlock(&vcpu->kvm->lock);
1227 return r;
1228 } else
1229 return set_msr_hyperv(vcpu, msr, data);
1230 break;
1074 default: 1231 default:
1075 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) 1232 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1076 return xen_hvm_config(vcpu, data); 1233 return xen_hvm_config(vcpu, data);
@@ -1170,6 +1327,54 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1170 return 0; 1327 return 0;
1171} 1328}
1172 1329
1330static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1331{
1332 u64 data = 0;
1333 struct kvm *kvm = vcpu->kvm;
1334
1335 switch (msr) {
1336 case HV_X64_MSR_GUEST_OS_ID:
1337 data = kvm->arch.hv_guest_os_id;
1338 break;
1339 case HV_X64_MSR_HYPERCALL:
1340 data = kvm->arch.hv_hypercall;
1341 break;
1342 default:
1343 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1344 return 1;
1345 }
1346
1347 *pdata = data;
1348 return 0;
1349}
1350
1351static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1352{
1353 u64 data = 0;
1354
1355 switch (msr) {
1356 case HV_X64_MSR_VP_INDEX: {
1357 int r;
1358 struct kvm_vcpu *v;
1359 kvm_for_each_vcpu(r, v, vcpu->kvm)
1360 if (v == vcpu)
1361 data = r;
1362 break;
1363 }
1364 case HV_X64_MSR_EOI:
1365 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1366 case HV_X64_MSR_ICR:
1367 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1368 case HV_X64_MSR_TPR:
1369 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1370 default:
1371 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1372 return 1;
1373 }
1374 *pdata = data;
1375 return 0;
1376}
1377
1173int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 1378int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1174{ 1379{
1175 u64 data; 1380 u64 data;
@@ -1221,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1221 data |= (((uint64_t)4ULL) << 40); 1426 data |= (((uint64_t)4ULL) << 40);
1222 break; 1427 break;
1223 case MSR_EFER: 1428 case MSR_EFER:
1224 data = vcpu->arch.shadow_efer; 1429 data = vcpu->arch.efer;
1225 break; 1430 break;
1226 case MSR_KVM_WALL_CLOCK: 1431 case MSR_KVM_WALL_CLOCK:
1227 data = vcpu->kvm->arch.wall_clock; 1432 data = vcpu->kvm->arch.wall_clock;
@@ -1236,6 +1441,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1236 case MSR_IA32_MCG_STATUS: 1441 case MSR_IA32_MCG_STATUS:
1237 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 1442 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1238 return get_msr_mce(vcpu, msr, pdata); 1443 return get_msr_mce(vcpu, msr, pdata);
1444 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1445 if (kvm_hv_msr_partition_wide(msr)) {
1446 int r;
1447 mutex_lock(&vcpu->kvm->lock);
1448 r = get_msr_hyperv_pw(vcpu, msr, pdata);
1449 mutex_unlock(&vcpu->kvm->lock);
1450 return r;
1451 } else
1452 return get_msr_hyperv(vcpu, msr, pdata);
1453 break;
1239 default: 1454 default:
1240 if (!ignore_msrs) { 1455 if (!ignore_msrs) {
1241 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); 1456 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -1261,15 +1476,15 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1261 int (*do_msr)(struct kvm_vcpu *vcpu, 1476 int (*do_msr)(struct kvm_vcpu *vcpu,
1262 unsigned index, u64 *data)) 1477 unsigned index, u64 *data))
1263{ 1478{
1264 int i; 1479 int i, idx;
1265 1480
1266 vcpu_load(vcpu); 1481 vcpu_load(vcpu);
1267 1482
1268 down_read(&vcpu->kvm->slots_lock); 1483 idx = srcu_read_lock(&vcpu->kvm->srcu);
1269 for (i = 0; i < msrs->nmsrs; ++i) 1484 for (i = 0; i < msrs->nmsrs; ++i)
1270 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 1485 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1271 break; 1486 break;
1272 up_read(&vcpu->kvm->slots_lock); 1487 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1273 1488
1274 vcpu_put(vcpu); 1489 vcpu_put(vcpu);
1275 1490
@@ -1351,6 +1566,11 @@ int kvm_dev_ioctl_check_extension(long ext)
1351 case KVM_CAP_XEN_HVM: 1566 case KVM_CAP_XEN_HVM:
1352 case KVM_CAP_ADJUST_CLOCK: 1567 case KVM_CAP_ADJUST_CLOCK:
1353 case KVM_CAP_VCPU_EVENTS: 1568 case KVM_CAP_VCPU_EVENTS:
1569 case KVM_CAP_HYPERV:
1570 case KVM_CAP_HYPERV_VAPIC:
1571 case KVM_CAP_HYPERV_SPIN:
1572 case KVM_CAP_PCI_SEGMENT:
1573 case KVM_CAP_X86_ROBUST_SINGLESTEP:
1354 r = 1; 1574 r = 1;
1355 break; 1575 break;
1356 case KVM_CAP_COALESCED_MMIO: 1576 case KVM_CAP_COALESCED_MMIO:
@@ -1464,8 +1684,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1464 1684
1465void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1685void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1466{ 1686{
1467 kvm_x86_ops->vcpu_put(vcpu);
1468 kvm_put_guest_fpu(vcpu); 1687 kvm_put_guest_fpu(vcpu);
1688 kvm_x86_ops->vcpu_put(vcpu);
1469} 1689}
1470 1690
1471static int is_efer_nx(void) 1691static int is_efer_nx(void)
@@ -1530,6 +1750,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1530 cpuid_fix_nx_cap(vcpu); 1750 cpuid_fix_nx_cap(vcpu);
1531 r = 0; 1751 r = 0;
1532 kvm_apic_set_version(vcpu); 1752 kvm_apic_set_version(vcpu);
1753 kvm_x86_ops->cpuid_update(vcpu);
1533 1754
1534out_free: 1755out_free:
1535 vfree(cpuid_entries); 1756 vfree(cpuid_entries);
@@ -1552,6 +1773,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1552 goto out; 1773 goto out;
1553 vcpu->arch.cpuid_nent = cpuid->nent; 1774 vcpu->arch.cpuid_nent = cpuid->nent;
1554 kvm_apic_set_version(vcpu); 1775 kvm_apic_set_version(vcpu);
1776 kvm_x86_ops->cpuid_update(vcpu);
1555 return 0; 1777 return 0;
1556 1778
1557out: 1779out:
@@ -1594,12 +1816,15 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1594 u32 index, int *nent, int maxnent) 1816 u32 index, int *nent, int maxnent)
1595{ 1817{
1596 unsigned f_nx = is_efer_nx() ? F(NX) : 0; 1818 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1597 unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
1598#ifdef CONFIG_X86_64 1819#ifdef CONFIG_X86_64
1820 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
1821 ? F(GBPAGES) : 0;
1599 unsigned f_lm = F(LM); 1822 unsigned f_lm = F(LM);
1600#else 1823#else
1824 unsigned f_gbpages = 0;
1601 unsigned f_lm = 0; 1825 unsigned f_lm = 0;
1602#endif 1826#endif
1827 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
1603 1828
1604 /* cpuid 1.edx */ 1829 /* cpuid 1.edx */
1605 const u32 kvm_supported_word0_x86_features = 1830 const u32 kvm_supported_word0_x86_features =
@@ -1619,7 +1844,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1619 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 1844 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1620 F(PAT) | F(PSE36) | 0 /* Reserved */ | 1845 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1621 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 1846 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1622 F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ | 1847 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
1623 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); 1848 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1624 /* cpuid 1.ecx */ 1849 /* cpuid 1.ecx */
1625 const u32 kvm_supported_word4_x86_features = 1850 const u32 kvm_supported_word4_x86_features =
@@ -1866,7 +2091,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1866 return 0; 2091 return 0;
1867 if (mce->status & MCI_STATUS_UC) { 2092 if (mce->status & MCI_STATUS_UC) {
1868 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 2093 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1869 !(vcpu->arch.cr4 & X86_CR4_MCE)) { 2094 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
1870 printk(KERN_DEBUG "kvm: set_mce: " 2095 printk(KERN_DEBUG "kvm: set_mce: "
1871 "injects mce exception while " 2096 "injects mce exception while "
1872 "previous one is in progress!\n"); 2097 "previous one is in progress!\n");
@@ -2160,14 +2385,14 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2160 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 2385 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2161 return -EINVAL; 2386 return -EINVAL;
2162 2387
2163 down_write(&kvm->slots_lock); 2388 mutex_lock(&kvm->slots_lock);
2164 spin_lock(&kvm->mmu_lock); 2389 spin_lock(&kvm->mmu_lock);
2165 2390
2166 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 2391 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2167 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 2392 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2168 2393
2169 spin_unlock(&kvm->mmu_lock); 2394 spin_unlock(&kvm->mmu_lock);
2170 up_write(&kvm->slots_lock); 2395 mutex_unlock(&kvm->slots_lock);
2171 return 0; 2396 return 0;
2172} 2397}
2173 2398
@@ -2176,13 +2401,35 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2176 return kvm->arch.n_alloc_mmu_pages; 2401 return kvm->arch.n_alloc_mmu_pages;
2177} 2402}
2178 2403
2404gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
2405{
2406 int i;
2407 struct kvm_mem_alias *alias;
2408 struct kvm_mem_aliases *aliases;
2409
2410 aliases = rcu_dereference(kvm->arch.aliases);
2411
2412 for (i = 0; i < aliases->naliases; ++i) {
2413 alias = &aliases->aliases[i];
2414 if (alias->flags & KVM_ALIAS_INVALID)
2415 continue;
2416 if (gfn >= alias->base_gfn
2417 && gfn < alias->base_gfn + alias->npages)
2418 return alias->target_gfn + gfn - alias->base_gfn;
2419 }
2420 return gfn;
2421}
2422
2179gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 2423gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2180{ 2424{
2181 int i; 2425 int i;
2182 struct kvm_mem_alias *alias; 2426 struct kvm_mem_alias *alias;
2427 struct kvm_mem_aliases *aliases;
2183 2428
2184 for (i = 0; i < kvm->arch.naliases; ++i) { 2429 aliases = rcu_dereference(kvm->arch.aliases);
2185 alias = &kvm->arch.aliases[i]; 2430
2431 for (i = 0; i < aliases->naliases; ++i) {
2432 alias = &aliases->aliases[i];
2186 if (gfn >= alias->base_gfn 2433 if (gfn >= alias->base_gfn
2187 && gfn < alias->base_gfn + alias->npages) 2434 && gfn < alias->base_gfn + alias->npages)
2188 return alias->target_gfn + gfn - alias->base_gfn; 2435 return alias->target_gfn + gfn - alias->base_gfn;
@@ -2200,6 +2447,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2200{ 2447{
2201 int r, n; 2448 int r, n;
2202 struct kvm_mem_alias *p; 2449 struct kvm_mem_alias *p;
2450 struct kvm_mem_aliases *aliases, *old_aliases;
2203 2451
2204 r = -EINVAL; 2452 r = -EINVAL;
2205 /* General sanity checks */ 2453 /* General sanity checks */
@@ -2216,26 +2464,48 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2216 < alias->target_phys_addr) 2464 < alias->target_phys_addr)
2217 goto out; 2465 goto out;
2218 2466
2219 down_write(&kvm->slots_lock); 2467 r = -ENOMEM;
2220 spin_lock(&kvm->mmu_lock); 2468 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2469 if (!aliases)
2470 goto out;
2471
2472 mutex_lock(&kvm->slots_lock);
2221 2473
2222 p = &kvm->arch.aliases[alias->slot]; 2474 /* invalidate any gfn reference in case of deletion/shrinking */
2475 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2476 aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
2477 old_aliases = kvm->arch.aliases;
2478 rcu_assign_pointer(kvm->arch.aliases, aliases);
2479 synchronize_srcu_expedited(&kvm->srcu);
2480 kvm_mmu_zap_all(kvm);
2481 kfree(old_aliases);
2482
2483 r = -ENOMEM;
2484 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2485 if (!aliases)
2486 goto out_unlock;
2487
2488 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2489
2490 p = &aliases->aliases[alias->slot];
2223 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; 2491 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2224 p->npages = alias->memory_size >> PAGE_SHIFT; 2492 p->npages = alias->memory_size >> PAGE_SHIFT;
2225 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; 2493 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
2494 p->flags &= ~(KVM_ALIAS_INVALID);
2226 2495
2227 for (n = KVM_ALIAS_SLOTS; n > 0; --n) 2496 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
2228 if (kvm->arch.aliases[n - 1].npages) 2497 if (aliases->aliases[n - 1].npages)
2229 break; 2498 break;
2230 kvm->arch.naliases = n; 2499 aliases->naliases = n;
2231 2500
2232 spin_unlock(&kvm->mmu_lock); 2501 old_aliases = kvm->arch.aliases;
2233 kvm_mmu_zap_all(kvm); 2502 rcu_assign_pointer(kvm->arch.aliases, aliases);
2234 2503 synchronize_srcu_expedited(&kvm->srcu);
2235 up_write(&kvm->slots_lock); 2504 kfree(old_aliases);
2236 2505 r = 0;
2237 return 0;
2238 2506
2507out_unlock:
2508 mutex_unlock(&kvm->slots_lock);
2239out: 2509out:
2240 return r; 2510 return r;
2241} 2511}
@@ -2273,18 +2543,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2273 r = 0; 2543 r = 0;
2274 switch (chip->chip_id) { 2544 switch (chip->chip_id) {
2275 case KVM_IRQCHIP_PIC_MASTER: 2545 case KVM_IRQCHIP_PIC_MASTER:
2276 spin_lock(&pic_irqchip(kvm)->lock); 2546 raw_spin_lock(&pic_irqchip(kvm)->lock);
2277 memcpy(&pic_irqchip(kvm)->pics[0], 2547 memcpy(&pic_irqchip(kvm)->pics[0],
2278 &chip->chip.pic, 2548 &chip->chip.pic,
2279 sizeof(struct kvm_pic_state)); 2549 sizeof(struct kvm_pic_state));
2280 spin_unlock(&pic_irqchip(kvm)->lock); 2550 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2281 break; 2551 break;
2282 case KVM_IRQCHIP_PIC_SLAVE: 2552 case KVM_IRQCHIP_PIC_SLAVE:
2283 spin_lock(&pic_irqchip(kvm)->lock); 2553 raw_spin_lock(&pic_irqchip(kvm)->lock);
2284 memcpy(&pic_irqchip(kvm)->pics[1], 2554 memcpy(&pic_irqchip(kvm)->pics[1],
2285 &chip->chip.pic, 2555 &chip->chip.pic,
2286 sizeof(struct kvm_pic_state)); 2556 sizeof(struct kvm_pic_state));
2287 spin_unlock(&pic_irqchip(kvm)->lock); 2557 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2288 break; 2558 break;
2289 case KVM_IRQCHIP_IOAPIC: 2559 case KVM_IRQCHIP_IOAPIC:
2290 r = kvm_set_ioapic(kvm, &chip->chip.ioapic); 2560 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
@@ -2364,29 +2634,62 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2364int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2634int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2365 struct kvm_dirty_log *log) 2635 struct kvm_dirty_log *log)
2366{ 2636{
2367 int r; 2637 int r, n, i;
2368 int n;
2369 struct kvm_memory_slot *memslot; 2638 struct kvm_memory_slot *memslot;
2370 int is_dirty = 0; 2639 unsigned long is_dirty = 0;
2640 unsigned long *dirty_bitmap = NULL;
2371 2641
2372 down_write(&kvm->slots_lock); 2642 mutex_lock(&kvm->slots_lock);
2373 2643
2374 r = kvm_get_dirty_log(kvm, log, &is_dirty); 2644 r = -EINVAL;
2375 if (r) 2645 if (log->slot >= KVM_MEMORY_SLOTS)
2646 goto out;
2647
2648 memslot = &kvm->memslots->memslots[log->slot];
2649 r = -ENOENT;
2650 if (!memslot->dirty_bitmap)
2651 goto out;
2652
2653 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2654
2655 r = -ENOMEM;
2656 dirty_bitmap = vmalloc(n);
2657 if (!dirty_bitmap)
2376 goto out; 2658 goto out;
2659 memset(dirty_bitmap, 0, n);
2660
2661 for (i = 0; !is_dirty && i < n/sizeof(long); i++)
2662 is_dirty = memslot->dirty_bitmap[i];
2377 2663
2378 /* If nothing is dirty, don't bother messing with page tables. */ 2664 /* If nothing is dirty, don't bother messing with page tables. */
2379 if (is_dirty) { 2665 if (is_dirty) {
2666 struct kvm_memslots *slots, *old_slots;
2667
2380 spin_lock(&kvm->mmu_lock); 2668 spin_lock(&kvm->mmu_lock);
2381 kvm_mmu_slot_remove_write_access(kvm, log->slot); 2669 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2382 spin_unlock(&kvm->mmu_lock); 2670 spin_unlock(&kvm->mmu_lock);
2383 memslot = &kvm->memslots[log->slot]; 2671
2384 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 2672 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
2385 memset(memslot->dirty_bitmap, 0, n); 2673 if (!slots)
2674 goto out_free;
2675
2676 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
2677 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
2678
2679 old_slots = kvm->memslots;
2680 rcu_assign_pointer(kvm->memslots, slots);
2681 synchronize_srcu_expedited(&kvm->srcu);
2682 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2683 kfree(old_slots);
2386 } 2684 }
2685
2387 r = 0; 2686 r = 0;
2687 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
2688 r = -EFAULT;
2689out_free:
2690 vfree(dirty_bitmap);
2388out: 2691out:
2389 up_write(&kvm->slots_lock); 2692 mutex_unlock(&kvm->slots_lock);
2390 return r; 2693 return r;
2391} 2694}
2392 2695
@@ -2469,6 +2772,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
2469 if (vpic) { 2772 if (vpic) {
2470 r = kvm_ioapic_init(kvm); 2773 r = kvm_ioapic_init(kvm);
2471 if (r) { 2774 if (r) {
2775 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
2776 &vpic->dev);
2472 kfree(vpic); 2777 kfree(vpic);
2473 goto create_irqchip_unlock; 2778 goto create_irqchip_unlock;
2474 } 2779 }
@@ -2480,10 +2785,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
2480 r = kvm_setup_default_irq_routing(kvm); 2785 r = kvm_setup_default_irq_routing(kvm);
2481 if (r) { 2786 if (r) {
2482 mutex_lock(&kvm->irq_lock); 2787 mutex_lock(&kvm->irq_lock);
2483 kfree(kvm->arch.vpic); 2788 kvm_ioapic_destroy(kvm);
2484 kfree(kvm->arch.vioapic); 2789 kvm_destroy_pic(kvm);
2485 kvm->arch.vpic = NULL;
2486 kvm->arch.vioapic = NULL;
2487 mutex_unlock(&kvm->irq_lock); 2790 mutex_unlock(&kvm->irq_lock);
2488 } 2791 }
2489 create_irqchip_unlock: 2792 create_irqchip_unlock:
@@ -2499,7 +2802,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
2499 sizeof(struct kvm_pit_config))) 2802 sizeof(struct kvm_pit_config)))
2500 goto out; 2803 goto out;
2501 create_pit: 2804 create_pit:
2502 down_write(&kvm->slots_lock); 2805 mutex_lock(&kvm->slots_lock);
2503 r = -EEXIST; 2806 r = -EEXIST;
2504 if (kvm->arch.vpit) 2807 if (kvm->arch.vpit)
2505 goto create_pit_unlock; 2808 goto create_pit_unlock;
@@ -2508,7 +2811,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
2508 if (kvm->arch.vpit) 2811 if (kvm->arch.vpit)
2509 r = 0; 2812 r = 0;
2510 create_pit_unlock: 2813 create_pit_unlock:
2511 up_write(&kvm->slots_lock); 2814 mutex_unlock(&kvm->slots_lock);
2512 break; 2815 break;
2513 case KVM_IRQ_LINE_STATUS: 2816 case KVM_IRQ_LINE_STATUS:
2514 case KVM_IRQ_LINE: { 2817 case KVM_IRQ_LINE: {
@@ -2725,7 +3028,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
2725 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v)) 3028 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
2726 return 0; 3029 return 0;
2727 3030
2728 return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v); 3031 return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
2729} 3032}
2730 3033
2731static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 3034static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
@@ -2734,17 +3037,44 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
2734 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v)) 3037 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
2735 return 0; 3038 return 0;
2736 3039
2737 return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v); 3040 return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
2738} 3041}
2739 3042
2740static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, 3043gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
2741 struct kvm_vcpu *vcpu) 3044{
3045 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3046 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3047}
3048
3049 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3050{
3051 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3052 access |= PFERR_FETCH_MASK;
3053 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3054}
3055
3056gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3057{
3058 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3059 access |= PFERR_WRITE_MASK;
3060 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3061}
3062
3063/* uses this to access any guest's mapped memory without checking CPL */
3064gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3065{
3066 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3067}
3068
3069static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3070 struct kvm_vcpu *vcpu, u32 access,
3071 u32 *error)
2742{ 3072{
2743 void *data = val; 3073 void *data = val;
2744 int r = X86EMUL_CONTINUE; 3074 int r = X86EMUL_CONTINUE;
2745 3075
2746 while (bytes) { 3076 while (bytes) {
2747 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3077 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
2748 unsigned offset = addr & (PAGE_SIZE-1); 3078 unsigned offset = addr & (PAGE_SIZE-1);
2749 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3079 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
2750 int ret; 3080 int ret;
@@ -2767,14 +3097,37 @@ out:
2767 return r; 3097 return r;
2768} 3098}
2769 3099
3100/* used for instruction fetching */
3101static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3102 struct kvm_vcpu *vcpu, u32 *error)
3103{
3104 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3105 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3106 access | PFERR_FETCH_MASK, error);
3107}
3108
3109static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3110 struct kvm_vcpu *vcpu, u32 *error)
3111{
3112 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3113 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3114 error);
3115}
3116
3117static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3118 struct kvm_vcpu *vcpu, u32 *error)
3119{
3120 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3121}
3122
2770static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, 3123static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2771 struct kvm_vcpu *vcpu) 3124 struct kvm_vcpu *vcpu, u32 *error)
2772{ 3125{
2773 void *data = val; 3126 void *data = val;
2774 int r = X86EMUL_CONTINUE; 3127 int r = X86EMUL_CONTINUE;
2775 3128
2776 while (bytes) { 3129 while (bytes) {
2777 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3130 gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
2778 unsigned offset = addr & (PAGE_SIZE-1); 3131 unsigned offset = addr & (PAGE_SIZE-1);
2779 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3132 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2780 int ret; 3133 int ret;
@@ -2804,6 +3157,7 @@ static int emulator_read_emulated(unsigned long addr,
2804 struct kvm_vcpu *vcpu) 3157 struct kvm_vcpu *vcpu)
2805{ 3158{
2806 gpa_t gpa; 3159 gpa_t gpa;
3160 u32 error_code;
2807 3161
2808 if (vcpu->mmio_read_completed) { 3162 if (vcpu->mmio_read_completed) {
2809 memcpy(val, vcpu->mmio_data, bytes); 3163 memcpy(val, vcpu->mmio_data, bytes);
@@ -2813,17 +3167,20 @@ static int emulator_read_emulated(unsigned long addr,
2813 return X86EMUL_CONTINUE; 3167 return X86EMUL_CONTINUE;
2814 } 3168 }
2815 3169
2816 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3170 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
3171
3172 if (gpa == UNMAPPED_GVA) {
3173 kvm_inject_page_fault(vcpu, addr, error_code);
3174 return X86EMUL_PROPAGATE_FAULT;
3175 }
2817 3176
2818 /* For APIC access vmexit */ 3177 /* For APIC access vmexit */
2819 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3178 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2820 goto mmio; 3179 goto mmio;
2821 3180
2822 if (kvm_read_guest_virt(addr, val, bytes, vcpu) 3181 if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
2823 == X86EMUL_CONTINUE) 3182 == X86EMUL_CONTINUE)
2824 return X86EMUL_CONTINUE; 3183 return X86EMUL_CONTINUE;
2825 if (gpa == UNMAPPED_GVA)
2826 return X86EMUL_PROPAGATE_FAULT;
2827 3184
2828mmio: 3185mmio:
2829 /* 3186 /*
@@ -2862,11 +3219,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
2862 struct kvm_vcpu *vcpu) 3219 struct kvm_vcpu *vcpu)
2863{ 3220{
2864 gpa_t gpa; 3221 gpa_t gpa;
3222 u32 error_code;
2865 3223
2866 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3224 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
2867 3225
2868 if (gpa == UNMAPPED_GVA) { 3226 if (gpa == UNMAPPED_GVA) {
2869 kvm_inject_page_fault(vcpu, addr, 2); 3227 kvm_inject_page_fault(vcpu, addr, error_code);
2870 return X86EMUL_PROPAGATE_FAULT; 3228 return X86EMUL_PROPAGATE_FAULT;
2871 } 3229 }
2872 3230
@@ -2930,7 +3288,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
2930 char *kaddr; 3288 char *kaddr;
2931 u64 val; 3289 u64 val;
2932 3290
2933 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3291 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
2934 3292
2935 if (gpa == UNMAPPED_GVA || 3293 if (gpa == UNMAPPED_GVA ||
2936 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3294 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -2967,35 +3325,21 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2967 3325
2968int emulate_clts(struct kvm_vcpu *vcpu) 3326int emulate_clts(struct kvm_vcpu *vcpu)
2969{ 3327{
2970 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); 3328 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3329 kvm_x86_ops->fpu_activate(vcpu);
2971 return X86EMUL_CONTINUE; 3330 return X86EMUL_CONTINUE;
2972} 3331}
2973 3332
2974int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) 3333int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2975{ 3334{
2976 struct kvm_vcpu *vcpu = ctxt->vcpu; 3335 return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
2977
2978 switch (dr) {
2979 case 0 ... 3:
2980 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2981 return X86EMUL_CONTINUE;
2982 default:
2983 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
2984 return X86EMUL_UNHANDLEABLE;
2985 }
2986} 3336}
2987 3337
2988int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) 3338int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2989{ 3339{
2990 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; 3340 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2991 int exception;
2992 3341
2993 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); 3342 return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
2994 if (exception) {
2995 /* FIXME: better handling */
2996 return X86EMUL_UNHANDLEABLE;
2997 }
2998 return X86EMUL_CONTINUE;
2999} 3343}
3000 3344
3001void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) 3345void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3009,7 +3353,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3009 3353
3010 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); 3354 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
3011 3355
3012 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu); 3356 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
3013 3357
3014 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", 3358 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
3015 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); 3359 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
@@ -3017,7 +3361,8 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3017EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); 3361EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
3018 3362
3019static struct x86_emulate_ops emulate_ops = { 3363static struct x86_emulate_ops emulate_ops = {
3020 .read_std = kvm_read_guest_virt, 3364 .read_std = kvm_read_guest_virt_system,
3365 .fetch = kvm_fetch_guest_virt,
3021 .read_emulated = emulator_read_emulated, 3366 .read_emulated = emulator_read_emulated,
3022 .write_emulated = emulator_write_emulated, 3367 .write_emulated = emulator_write_emulated,
3023 .cmpxchg_emulated = emulator_cmpxchg_emulated, 3368 .cmpxchg_emulated = emulator_cmpxchg_emulated,
@@ -3060,8 +3405,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
3060 vcpu->arch.emulate_ctxt.vcpu = vcpu; 3405 vcpu->arch.emulate_ctxt.vcpu = vcpu;
3061 vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu); 3406 vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
3062 vcpu->arch.emulate_ctxt.mode = 3407 vcpu->arch.emulate_ctxt.mode =
3408 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
3063 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) 3409 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
3064 ? X86EMUL_MODE_REAL : cs_l 3410 ? X86EMUL_MODE_VM86 : cs_l
3065 ? X86EMUL_MODE_PROT64 : cs_db 3411 ? X86EMUL_MODE_PROT64 : cs_db
3066 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; 3412 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3067 3413
@@ -3153,12 +3499,17 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
3153 gva_t q = vcpu->arch.pio.guest_gva; 3499 gva_t q = vcpu->arch.pio.guest_gva;
3154 unsigned bytes; 3500 unsigned bytes;
3155 int ret; 3501 int ret;
3502 u32 error_code;
3156 3503
3157 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count; 3504 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
3158 if (vcpu->arch.pio.in) 3505 if (vcpu->arch.pio.in)
3159 ret = kvm_write_guest_virt(q, p, bytes, vcpu); 3506 ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
3160 else 3507 else
3161 ret = kvm_read_guest_virt(q, p, bytes, vcpu); 3508 ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
3509
3510 if (ret == X86EMUL_PROPAGATE_FAULT)
3511 kvm_inject_page_fault(vcpu, q, error_code);
3512
3162 return ret; 3513 return ret;
3163} 3514}
3164 3515
@@ -3179,7 +3530,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
3179 if (io->in) { 3530 if (io->in) {
3180 r = pio_copy_data(vcpu); 3531 r = pio_copy_data(vcpu);
3181 if (r) 3532 if (r)
3182 return r; 3533 goto out;
3183 } 3534 }
3184 3535
3185 delta = 1; 3536 delta = 1;
@@ -3206,7 +3557,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
3206 kvm_register_write(vcpu, VCPU_REGS_RSI, val); 3557 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
3207 } 3558 }
3208 } 3559 }
3209 3560out:
3210 io->count -= io->cur_count; 3561 io->count -= io->cur_count;
3211 io->cur_count = 0; 3562 io->cur_count = 0;
3212 3563
@@ -3219,11 +3570,12 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3219 int r; 3570 int r;
3220 3571
3221 if (vcpu->arch.pio.in) 3572 if (vcpu->arch.pio.in)
3222 r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port, 3573 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3223 vcpu->arch.pio.size, pd); 3574 vcpu->arch.pio.size, pd);
3224 else 3575 else
3225 r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port, 3576 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3226 vcpu->arch.pio.size, pd); 3577 vcpu->arch.pio.port, vcpu->arch.pio.size,
3578 pd);
3227 return r; 3579 return r;
3228} 3580}
3229 3581
@@ -3234,7 +3586,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu)
3234 int i, r = 0; 3586 int i, r = 0;
3235 3587
3236 for (i = 0; i < io->cur_count; i++) { 3588 for (i = 0; i < io->cur_count; i++) {
3237 if (kvm_io_bus_write(&vcpu->kvm->pio_bus, 3589 if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3238 io->port, io->size, pd)) { 3590 io->port, io->size, pd)) {
3239 r = -EOPNOTSUPP; 3591 r = -EOPNOTSUPP;
3240 break; 3592 break;
@@ -3248,6 +3600,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
3248{ 3600{
3249 unsigned long val; 3601 unsigned long val;
3250 3602
3603 trace_kvm_pio(!in, port, size, 1);
3604
3251 vcpu->run->exit_reason = KVM_EXIT_IO; 3605 vcpu->run->exit_reason = KVM_EXIT_IO;
3252 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 3606 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3253 vcpu->run->io.size = vcpu->arch.pio.size = size; 3607 vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3259,11 +3613,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
3259 vcpu->arch.pio.down = 0; 3613 vcpu->arch.pio.down = 0;
3260 vcpu->arch.pio.rep = 0; 3614 vcpu->arch.pio.rep = 0;
3261 3615
3262 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port, 3616 if (!vcpu->arch.pio.in) {
3263 size, 1); 3617 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3264 3618 memcpy(vcpu->arch.pio_data, &val, 4);
3265 val = kvm_register_read(vcpu, VCPU_REGS_RAX); 3619 }
3266 memcpy(vcpu->arch.pio_data, &val, 4);
3267 3620
3268 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { 3621 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3269 complete_pio(vcpu); 3622 complete_pio(vcpu);
@@ -3280,6 +3633,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
3280 unsigned now, in_page; 3633 unsigned now, in_page;
3281 int ret = 0; 3634 int ret = 0;
3282 3635
3636 trace_kvm_pio(!in, port, size, count);
3637
3283 vcpu->run->exit_reason = KVM_EXIT_IO; 3638 vcpu->run->exit_reason = KVM_EXIT_IO;
3284 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 3639 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3285 vcpu->run->io.size = vcpu->arch.pio.size = size; 3640 vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3291,9 +3646,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
3291 vcpu->arch.pio.down = down; 3646 vcpu->arch.pio.down = down;
3292 vcpu->arch.pio.rep = rep; 3647 vcpu->arch.pio.rep = rep;
3293 3648
3294 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
3295 size, count);
3296
3297 if (!count) { 3649 if (!count) {
3298 kvm_x86_ops->skip_emulated_instruction(vcpu); 3650 kvm_x86_ops->skip_emulated_instruction(vcpu);
3299 return 1; 3651 return 1;
@@ -3325,10 +3677,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
3325 if (!vcpu->arch.pio.in) { 3677 if (!vcpu->arch.pio.in) {
3326 /* string PIO write */ 3678 /* string PIO write */
3327 ret = pio_copy_data(vcpu); 3679 ret = pio_copy_data(vcpu);
3328 if (ret == X86EMUL_PROPAGATE_FAULT) { 3680 if (ret == X86EMUL_PROPAGATE_FAULT)
3329 kvm_inject_gp(vcpu, 0);
3330 return 1; 3681 return 1;
3331 }
3332 if (ret == 0 && !pio_string_write(vcpu)) { 3682 if (ret == 0 && !pio_string_write(vcpu)) {
3333 complete_pio(vcpu); 3683 complete_pio(vcpu);
3334 if (vcpu->arch.pio.count == 0) 3684 if (vcpu->arch.pio.count == 0)
@@ -3487,11 +3837,76 @@ static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3487 return a0 | ((gpa_t)a1 << 32); 3837 return a0 | ((gpa_t)a1 << 32);
3488} 3838}
3489 3839
3840int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
3841{
3842 u64 param, ingpa, outgpa, ret;
3843 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
3844 bool fast, longmode;
3845 int cs_db, cs_l;
3846
3847 /*
3848 * hypercall generates UD from non zero cpl and real mode
3849 * per HYPER-V spec
3850 */
3851 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
3852 kvm_queue_exception(vcpu, UD_VECTOR);
3853 return 0;
3854 }
3855
3856 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3857 longmode = is_long_mode(vcpu) && cs_l == 1;
3858
3859 if (!longmode) {
3860 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
3861 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
3862 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
3863 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
3864 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
3865 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
3866 }
3867#ifdef CONFIG_X86_64
3868 else {
3869 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
3870 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
3871 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
3872 }
3873#endif
3874
3875 code = param & 0xffff;
3876 fast = (param >> 16) & 0x1;
3877 rep_cnt = (param >> 32) & 0xfff;
3878 rep_idx = (param >> 48) & 0xfff;
3879
3880 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
3881
3882 switch (code) {
3883 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
3884 kvm_vcpu_on_spin(vcpu);
3885 break;
3886 default:
3887 res = HV_STATUS_INVALID_HYPERCALL_CODE;
3888 break;
3889 }
3890
3891 ret = res | (((u64)rep_done & 0xfff) << 32);
3892 if (longmode) {
3893 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
3894 } else {
3895 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
3896 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
3897 }
3898
3899 return 1;
3900}
3901
3490int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 3902int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3491{ 3903{
3492 unsigned long nr, a0, a1, a2, a3, ret; 3904 unsigned long nr, a0, a1, a2, a3, ret;
3493 int r = 1; 3905 int r = 1;
3494 3906
3907 if (kvm_hv_hypercall_enabled(vcpu->kvm))
3908 return kvm_hv_hypercall(vcpu);
3909
3495 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 3910 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3496 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 3911 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3497 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); 3912 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
@@ -3534,10 +3949,8 @@ EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3534int kvm_fix_hypercall(struct kvm_vcpu *vcpu) 3949int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3535{ 3950{
3536 char instruction[3]; 3951 char instruction[3];
3537 int ret = 0;
3538 unsigned long rip = kvm_rip_read(vcpu); 3952 unsigned long rip = kvm_rip_read(vcpu);
3539 3953
3540
3541 /* 3954 /*
3542 * Blow out the MMU to ensure that no other VCPU has an active mapping 3955 * Blow out the MMU to ensure that no other VCPU has an active mapping
3543 * to ensure that the updated hypercall appears atomically across all 3956 * to ensure that the updated hypercall appears atomically across all
@@ -3546,11 +3959,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3546 kvm_mmu_zap_all(vcpu->kvm); 3959 kvm_mmu_zap_all(vcpu->kvm);
3547 3960
3548 kvm_x86_ops->patch_hypercall(vcpu, instruction); 3961 kvm_x86_ops->patch_hypercall(vcpu, instruction);
3549 if (emulator_write_emulated(rip, instruction, 3, vcpu)
3550 != X86EMUL_CONTINUE)
3551 ret = -EFAULT;
3552 3962
3553 return ret; 3963 return emulator_write_emulated(rip, instruction, 3, vcpu);
3554} 3964}
3555 3965
3556static u64 mk_cr_64(u64 curr_cr, u32 new_val) 3966static u64 mk_cr_64(u64 curr_cr, u32 new_val)
@@ -3583,10 +3993,9 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3583{ 3993{
3584 unsigned long value; 3994 unsigned long value;
3585 3995
3586 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3587 switch (cr) { 3996 switch (cr) {
3588 case 0: 3997 case 0:
3589 value = vcpu->arch.cr0; 3998 value = kvm_read_cr0(vcpu);
3590 break; 3999 break;
3591 case 2: 4000 case 2:
3592 value = vcpu->arch.cr2; 4001 value = vcpu->arch.cr2;
@@ -3595,7 +4004,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3595 value = vcpu->arch.cr3; 4004 value = vcpu->arch.cr3;
3596 break; 4005 break;
3597 case 4: 4006 case 4:
3598 value = vcpu->arch.cr4; 4007 value = kvm_read_cr4(vcpu);
3599 break; 4008 break;
3600 case 8: 4009 case 8:
3601 value = kvm_get_cr8(vcpu); 4010 value = kvm_get_cr8(vcpu);
@@ -3613,7 +4022,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3613{ 4022{
3614 switch (cr) { 4023 switch (cr) {
3615 case 0: 4024 case 0:
3616 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); 4025 kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
3617 *rflags = kvm_get_rflags(vcpu); 4026 *rflags = kvm_get_rflags(vcpu);
3618 break; 4027 break;
3619 case 2: 4028 case 2:
@@ -3623,7 +4032,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3623 kvm_set_cr3(vcpu, val); 4032 kvm_set_cr3(vcpu, val);
3624 break; 4033 break;
3625 case 4: 4034 case 4:
3626 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val)); 4035 kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
3627 break; 4036 break;
3628 case 8: 4037 case 8:
3629 kvm_set_cr8(vcpu, val & 0xfUL); 4038 kvm_set_cr8(vcpu, val & 0xfUL);
@@ -3690,6 +4099,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3690 } 4099 }
3691 return best; 4100 return best;
3692} 4101}
4102EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
3693 4103
3694int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) 4104int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3695{ 4105{
@@ -3773,14 +4183,15 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
3773static void vapic_exit(struct kvm_vcpu *vcpu) 4183static void vapic_exit(struct kvm_vcpu *vcpu)
3774{ 4184{
3775 struct kvm_lapic *apic = vcpu->arch.apic; 4185 struct kvm_lapic *apic = vcpu->arch.apic;
4186 int idx;
3776 4187
3777 if (!apic || !apic->vapic_addr) 4188 if (!apic || !apic->vapic_addr)
3778 return; 4189 return;
3779 4190
3780 down_read(&vcpu->kvm->slots_lock); 4191 idx = srcu_read_lock(&vcpu->kvm->srcu);
3781 kvm_release_page_dirty(apic->vapic_page); 4192 kvm_release_page_dirty(apic->vapic_page);
3782 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 4193 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3783 up_read(&vcpu->kvm->slots_lock); 4194 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3784} 4195}
3785 4196
3786static void update_cr8_intercept(struct kvm_vcpu *vcpu) 4197static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -3876,12 +4287,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3876 r = 0; 4287 r = 0;
3877 goto out; 4288 goto out;
3878 } 4289 }
4290 if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
4291 vcpu->fpu_active = 0;
4292 kvm_x86_ops->fpu_deactivate(vcpu);
4293 }
3879 } 4294 }
3880 4295
3881 preempt_disable(); 4296 preempt_disable();
3882 4297
3883 kvm_x86_ops->prepare_guest_switch(vcpu); 4298 kvm_x86_ops->prepare_guest_switch(vcpu);
3884 kvm_load_guest_fpu(vcpu); 4299 if (vcpu->fpu_active)
4300 kvm_load_guest_fpu(vcpu);
3885 4301
3886 local_irq_disable(); 4302 local_irq_disable();
3887 4303
@@ -3909,7 +4325,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3909 kvm_lapic_sync_to_vapic(vcpu); 4325 kvm_lapic_sync_to_vapic(vcpu);
3910 } 4326 }
3911 4327
3912 up_read(&vcpu->kvm->slots_lock); 4328 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3913 4329
3914 kvm_guest_enter(); 4330 kvm_guest_enter();
3915 4331
@@ -3951,7 +4367,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3951 4367
3952 preempt_enable(); 4368 preempt_enable();
3953 4369
3954 down_read(&vcpu->kvm->slots_lock); 4370 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3955 4371
3956 /* 4372 /*
3957 * Profile KVM exit RIPs: 4373 * Profile KVM exit RIPs:
@@ -3973,6 +4389,7 @@ out:
3973static int __vcpu_run(struct kvm_vcpu *vcpu) 4389static int __vcpu_run(struct kvm_vcpu *vcpu)
3974{ 4390{
3975 int r; 4391 int r;
4392 struct kvm *kvm = vcpu->kvm;
3976 4393
3977 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { 4394 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
3978 pr_debug("vcpu %d received sipi with vector # %x\n", 4395 pr_debug("vcpu %d received sipi with vector # %x\n",
@@ -3984,7 +4401,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
3984 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4401 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3985 } 4402 }
3986 4403
3987 down_read(&vcpu->kvm->slots_lock); 4404 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
3988 vapic_enter(vcpu); 4405 vapic_enter(vcpu);
3989 4406
3990 r = 1; 4407 r = 1;
@@ -3992,9 +4409,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
3992 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) 4409 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
3993 r = vcpu_enter_guest(vcpu); 4410 r = vcpu_enter_guest(vcpu);
3994 else { 4411 else {
3995 up_read(&vcpu->kvm->slots_lock); 4412 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
3996 kvm_vcpu_block(vcpu); 4413 kvm_vcpu_block(vcpu);
3997 down_read(&vcpu->kvm->slots_lock); 4414 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
3998 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 4415 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
3999 { 4416 {
4000 switch(vcpu->arch.mp_state) { 4417 switch(vcpu->arch.mp_state) {
@@ -4029,13 +4446,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
4029 ++vcpu->stat.signal_exits; 4446 ++vcpu->stat.signal_exits;
4030 } 4447 }
4031 if (need_resched()) { 4448 if (need_resched()) {
4032 up_read(&vcpu->kvm->slots_lock); 4449 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4033 kvm_resched(vcpu); 4450 kvm_resched(vcpu);
4034 down_read(&vcpu->kvm->slots_lock); 4451 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4035 } 4452 }
4036 } 4453 }
4037 4454
4038 up_read(&vcpu->kvm->slots_lock); 4455 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4039 post_kvm_run_save(vcpu); 4456 post_kvm_run_save(vcpu);
4040 4457
4041 vapic_exit(vcpu); 4458 vapic_exit(vcpu);
@@ -4074,10 +4491,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4074 vcpu->mmio_read_completed = 1; 4491 vcpu->mmio_read_completed = 1;
4075 vcpu->mmio_needed = 0; 4492 vcpu->mmio_needed = 0;
4076 4493
4077 down_read(&vcpu->kvm->slots_lock); 4494 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4078 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0, 4495 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
4079 EMULTYPE_NO_DECODE); 4496 EMULTYPE_NO_DECODE);
4080 up_read(&vcpu->kvm->slots_lock); 4497 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4081 if (r == EMULATE_DO_MMIO) { 4498 if (r == EMULATE_DO_MMIO) {
4082 /* 4499 /*
4083 * Read-modify-write. Back to userspace. 4500 * Read-modify-write. Back to userspace.
@@ -4204,13 +4621,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4204 sregs->gdt.limit = dt.limit; 4621 sregs->gdt.limit = dt.limit;
4205 sregs->gdt.base = dt.base; 4622 sregs->gdt.base = dt.base;
4206 4623
4207 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 4624 sregs->cr0 = kvm_read_cr0(vcpu);
4208 sregs->cr0 = vcpu->arch.cr0;
4209 sregs->cr2 = vcpu->arch.cr2; 4625 sregs->cr2 = vcpu->arch.cr2;
4210 sregs->cr3 = vcpu->arch.cr3; 4626 sregs->cr3 = vcpu->arch.cr3;
4211 sregs->cr4 = vcpu->arch.cr4; 4627 sregs->cr4 = kvm_read_cr4(vcpu);
4212 sregs->cr8 = kvm_get_cr8(vcpu); 4628 sregs->cr8 = kvm_get_cr8(vcpu);
4213 sregs->efer = vcpu->arch.shadow_efer; 4629 sregs->efer = vcpu->arch.efer;
4214 sregs->apic_base = kvm_get_apic_base(vcpu); 4630 sregs->apic_base = kvm_get_apic_base(vcpu);
4215 4631
4216 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 4632 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -4298,14 +4714,23 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4298{ 4714{
4299 struct descriptor_table dtable; 4715 struct descriptor_table dtable;
4300 u16 index = selector >> 3; 4716 u16 index = selector >> 3;
4717 int ret;
4718 u32 err;
4719 gva_t addr;
4301 4720
4302 get_segment_descriptor_dtable(vcpu, selector, &dtable); 4721 get_segment_descriptor_dtable(vcpu, selector, &dtable);
4303 4722
4304 if (dtable.limit < index * 8 + 7) { 4723 if (dtable.limit < index * 8 + 7) {
4305 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 4724 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4306 return 1; 4725 return X86EMUL_PROPAGATE_FAULT;
4307 } 4726 }
4308 return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); 4727 addr = dtable.base + index * 8;
4728 ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
4729 vcpu, &err);
4730 if (ret == X86EMUL_PROPAGATE_FAULT)
4731 kvm_inject_page_fault(vcpu, addr, err);
4732
4733 return ret;
4309} 4734}
4310 4735
4311/* allowed just for 8 bytes segments */ 4736/* allowed just for 8 bytes segments */
@@ -4319,15 +4744,23 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4319 4744
4320 if (dtable.limit < index * 8 + 7) 4745 if (dtable.limit < index * 8 + 7)
4321 return 1; 4746 return 1;
4322 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); 4747 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
4748}
4749
4750static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
4751 struct desc_struct *seg_desc)
4752{
4753 u32 base_addr = get_desc_base(seg_desc);
4754
4755 return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
4323} 4756}
4324 4757
4325static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu, 4758static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
4326 struct desc_struct *seg_desc) 4759 struct desc_struct *seg_desc)
4327{ 4760{
4328 u32 base_addr = get_desc_base(seg_desc); 4761 u32 base_addr = get_desc_base(seg_desc);
4329 4762
4330 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); 4763 return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
4331} 4764}
4332 4765
4333static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) 4766static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -4338,18 +4771,6 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4338 return kvm_seg.selector; 4771 return kvm_seg.selector;
4339} 4772}
4340 4773
4341static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
4342 u16 selector,
4343 struct kvm_segment *kvm_seg)
4344{
4345 struct desc_struct seg_desc;
4346
4347 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
4348 return 1;
4349 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
4350 return 0;
4351}
4352
4353static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg) 4774static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
4354{ 4775{
4355 struct kvm_segment segvar = { 4776 struct kvm_segment segvar = {
@@ -4367,7 +4788,7 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se
4367 .unusable = 0, 4788 .unusable = 0,
4368 }; 4789 };
4369 kvm_x86_ops->set_segment(vcpu, &segvar, seg); 4790 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4370 return 0; 4791 return X86EMUL_CONTINUE;
4371} 4792}
4372 4793
4373static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg) 4794static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
@@ -4377,24 +4798,112 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4377 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM); 4798 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
4378} 4799}
4379 4800
4380int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4801int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
4381 int type_bits, int seg)
4382{ 4802{
4383 struct kvm_segment kvm_seg; 4803 struct kvm_segment kvm_seg;
4804 struct desc_struct seg_desc;
4805 u8 dpl, rpl, cpl;
4806 unsigned err_vec = GP_VECTOR;
4807 u32 err_code = 0;
4808 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
4809 int ret;
4384 4810
4385 if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE)) 4811 if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
4386 return kvm_load_realmode_segment(vcpu, selector, seg); 4812 return kvm_load_realmode_segment(vcpu, selector, seg);
4387 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
4388 return 1;
4389 kvm_seg.type |= type_bits;
4390 4813
4391 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS && 4814 /* NULL selector is not valid for TR, CS and SS */
4392 seg != VCPU_SREG_LDTR) 4815 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
4393 if (!kvm_seg.s) 4816 && null_selector)
4394 kvm_seg.unusable = 1; 4817 goto exception;
4818
4819 /* TR should be in GDT only */
4820 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
4821 goto exception;
4822
4823 ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
4824 if (ret)
4825 return ret;
4826
4827 seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
4828
4829 if (null_selector) { /* for NULL selector skip all following checks */
4830 kvm_seg.unusable = 1;
4831 goto load;
4832 }
4833
4834 err_code = selector & 0xfffc;
4835 err_vec = GP_VECTOR;
4395 4836
4837 /* can't load system descriptor into segment selecor */
4838 if (seg <= VCPU_SREG_GS && !kvm_seg.s)
4839 goto exception;
4840
4841 if (!kvm_seg.present) {
4842 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
4843 goto exception;
4844 }
4845
4846 rpl = selector & 3;
4847 dpl = kvm_seg.dpl;
4848 cpl = kvm_x86_ops->get_cpl(vcpu);
4849
4850 switch (seg) {
4851 case VCPU_SREG_SS:
4852 /*
4853 * segment is not a writable data segment or segment
4854 * selector's RPL != CPL or segment selector's RPL != CPL
4855 */
4856 if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
4857 goto exception;
4858 break;
4859 case VCPU_SREG_CS:
4860 if (!(kvm_seg.type & 8))
4861 goto exception;
4862
4863 if (kvm_seg.type & 4) {
4864 /* conforming */
4865 if (dpl > cpl)
4866 goto exception;
4867 } else {
4868 /* nonconforming */
4869 if (rpl > cpl || dpl != cpl)
4870 goto exception;
4871 }
4872 /* CS(RPL) <- CPL */
4873 selector = (selector & 0xfffc) | cpl;
4874 break;
4875 case VCPU_SREG_TR:
4876 if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
4877 goto exception;
4878 break;
4879 case VCPU_SREG_LDTR:
4880 if (kvm_seg.s || kvm_seg.type != 2)
4881 goto exception;
4882 break;
4883 default: /* DS, ES, FS, or GS */
4884 /*
4885 * segment is not a data or readable code segment or
4886 * ((segment is a data or nonconforming code segment)
4887 * and (both RPL and CPL > DPL))
4888 */
4889 if ((kvm_seg.type & 0xa) == 0x8 ||
4890 (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
4891 goto exception;
4892 break;
4893 }
4894
4895 if (!kvm_seg.unusable && kvm_seg.s) {
4896 /* mark segment as accessed */
4897 kvm_seg.type |= 1;
4898 seg_desc.type |= 1;
4899 save_guest_segment_descriptor(vcpu, selector, &seg_desc);
4900 }
4901load:
4396 kvm_set_segment(vcpu, &kvm_seg, seg); 4902 kvm_set_segment(vcpu, &kvm_seg, seg);
4397 return 0; 4903 return X86EMUL_CONTINUE;
4904exception:
4905 kvm_queue_exception_e(vcpu, err_vec, err_code);
4906 return X86EMUL_PROPAGATE_FAULT;
4398} 4907}
4399 4908
4400static void save_state_to_tss32(struct kvm_vcpu *vcpu, 4909static void save_state_to_tss32(struct kvm_vcpu *vcpu,
@@ -4420,6 +4929,14 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4420 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR); 4929 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4421} 4930}
4422 4931
4932static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
4933{
4934 struct kvm_segment kvm_seg;
4935 kvm_get_segment(vcpu, &kvm_seg, seg);
4936 kvm_seg.selector = sel;
4937 kvm_set_segment(vcpu, &kvm_seg, seg);
4938}
4939
4423static int load_state_from_tss32(struct kvm_vcpu *vcpu, 4940static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4424 struct tss_segment_32 *tss) 4941 struct tss_segment_32 *tss)
4425{ 4942{
@@ -4437,25 +4954,41 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4437 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi); 4954 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4438 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi); 4955 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
4439 4956
4440 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) 4957 /*
4958 * SDM says that segment selectors are loaded before segment
4959 * descriptors
4960 */
4961 kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
4962 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
4963 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
4964 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
4965 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
4966 kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
4967 kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
4968
4969 /*
4970 * Now load segment descriptors. If fault happenes at this stage
4971 * it is handled in a context of new task
4972 */
4973 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
4441 return 1; 4974 return 1;
4442 4975
4443 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) 4976 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
4444 return 1; 4977 return 1;
4445 4978
4446 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) 4979 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
4447 return 1; 4980 return 1;
4448 4981
4449 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) 4982 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
4450 return 1; 4983 return 1;
4451 4984
4452 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) 4985 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
4453 return 1; 4986 return 1;
4454 4987
4455 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS)) 4988 if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
4456 return 1; 4989 return 1;
4457 4990
4458 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS)) 4991 if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
4459 return 1; 4992 return 1;
4460 return 0; 4993 return 0;
4461} 4994}
@@ -4495,19 +5028,33 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4495 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si); 5028 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4496 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di); 5029 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
4497 5030
4498 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) 5031 /*
5032 * SDM says that segment selectors are loaded before segment
5033 * descriptors
5034 */
5035 kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
5036 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
5037 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
5038 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
5039 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
5040
5041 /*
5042 * Now load segment descriptors. If fault happenes at this stage
5043 * it is handled in a context of new task
5044 */
5045 if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
4499 return 1; 5046 return 1;
4500 5047
4501 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) 5048 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
4502 return 1; 5049 return 1;
4503 5050
4504 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) 5051 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
4505 return 1; 5052 return 1;
4506 5053
4507 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) 5054 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
4508 return 1; 5055 return 1;
4509 5056
4510 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) 5057 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
4511 return 1; 5058 return 1;
4512 return 0; 5059 return 0;
4513} 5060}
@@ -4529,7 +5076,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
4529 sizeof tss_segment_16)) 5076 sizeof tss_segment_16))
4530 goto out; 5077 goto out;
4531 5078
4532 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), 5079 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
4533 &tss_segment_16, sizeof tss_segment_16)) 5080 &tss_segment_16, sizeof tss_segment_16))
4534 goto out; 5081 goto out;
4535 5082
@@ -4537,7 +5084,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
4537 tss_segment_16.prev_task_link = old_tss_sel; 5084 tss_segment_16.prev_task_link = old_tss_sel;
4538 5085
4539 if (kvm_write_guest(vcpu->kvm, 5086 if (kvm_write_guest(vcpu->kvm,
4540 get_tss_base_addr(vcpu, nseg_desc), 5087 get_tss_base_addr_write(vcpu, nseg_desc),
4541 &tss_segment_16.prev_task_link, 5088 &tss_segment_16.prev_task_link,
4542 sizeof tss_segment_16.prev_task_link)) 5089 sizeof tss_segment_16.prev_task_link))
4543 goto out; 5090 goto out;
@@ -4568,7 +5115,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
4568 sizeof tss_segment_32)) 5115 sizeof tss_segment_32))
4569 goto out; 5116 goto out;
4570 5117
4571 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), 5118 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
4572 &tss_segment_32, sizeof tss_segment_32)) 5119 &tss_segment_32, sizeof tss_segment_32))
4573 goto out; 5120 goto out;
4574 5121
@@ -4576,7 +5123,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
4576 tss_segment_32.prev_task_link = old_tss_sel; 5123 tss_segment_32.prev_task_link = old_tss_sel;
4577 5124
4578 if (kvm_write_guest(vcpu->kvm, 5125 if (kvm_write_guest(vcpu->kvm,
4579 get_tss_base_addr(vcpu, nseg_desc), 5126 get_tss_base_addr_write(vcpu, nseg_desc),
4580 &tss_segment_32.prev_task_link, 5127 &tss_segment_32.prev_task_link,
4581 sizeof tss_segment_32.prev_task_link)) 5128 sizeof tss_segment_32.prev_task_link))
4582 goto out; 5129 goto out;
@@ -4599,7 +5146,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4599 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); 5146 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4600 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); 5147 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
4601 5148
4602 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); 5149 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
4603 5150
4604 /* FIXME: Handle errors. Failure to read either TSS or their 5151 /* FIXME: Handle errors. Failure to read either TSS or their
4605 * descriptors should generate a pagefault. 5152 * descriptors should generate a pagefault.
@@ -4658,7 +5205,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4658 &nseg_desc); 5205 &nseg_desc);
4659 } 5206 }
4660 5207
4661 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); 5208 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
4662 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); 5209 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4663 tr_seg.type = 11; 5210 tr_seg.type = 11;
4664 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); 5211 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
@@ -4689,17 +5236,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4689 5236
4690 kvm_set_cr8(vcpu, sregs->cr8); 5237 kvm_set_cr8(vcpu, sregs->cr8);
4691 5238
4692 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; 5239 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
4693 kvm_x86_ops->set_efer(vcpu, sregs->efer); 5240 kvm_x86_ops->set_efer(vcpu, sregs->efer);
4694 kvm_set_apic_base(vcpu, sregs->apic_base); 5241 kvm_set_apic_base(vcpu, sregs->apic_base);
4695 5242
4696 kvm_x86_ops->decache_cr4_guest_bits(vcpu); 5243 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
4697
4698 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
4699 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 5244 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
4700 vcpu->arch.cr0 = sregs->cr0; 5245 vcpu->arch.cr0 = sregs->cr0;
4701 5246
4702 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; 5247 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
4703 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5248 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4704 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5249 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
4705 load_pdptrs(vcpu, vcpu->arch.cr3); 5250 load_pdptrs(vcpu, vcpu->arch.cr3);
@@ -4734,7 +5279,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4734 /* Older userspace won't unhalt the vcpu on reset. */ 5279 /* Older userspace won't unhalt the vcpu on reset. */
4735 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 5280 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
4736 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 5281 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4737 !(vcpu->arch.cr0 & X86_CR0_PE)) 5282 !is_protmode(vcpu))
4738 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 5283 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4739 5284
4740 vcpu_put(vcpu); 5285 vcpu_put(vcpu);
@@ -4832,11 +5377,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4832{ 5377{
4833 unsigned long vaddr = tr->linear_address; 5378 unsigned long vaddr = tr->linear_address;
4834 gpa_t gpa; 5379 gpa_t gpa;
5380 int idx;
4835 5381
4836 vcpu_load(vcpu); 5382 vcpu_load(vcpu);
4837 down_read(&vcpu->kvm->slots_lock); 5383 idx = srcu_read_lock(&vcpu->kvm->srcu);
4838 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); 5384 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
4839 up_read(&vcpu->kvm->slots_lock); 5385 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4840 tr->physical_address = gpa; 5386 tr->physical_address = gpa;
4841 tr->valid = gpa != UNMAPPED_GVA; 5387 tr->valid = gpa != UNMAPPED_GVA;
4842 tr->writeable = 1; 5388 tr->writeable = 1;
@@ -4917,14 +5463,14 @@ EXPORT_SYMBOL_GPL(fx_init);
4917 5463
4918void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 5464void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4919{ 5465{
4920 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded) 5466 if (vcpu->guest_fpu_loaded)
4921 return; 5467 return;
4922 5468
4923 vcpu->guest_fpu_loaded = 1; 5469 vcpu->guest_fpu_loaded = 1;
4924 kvm_fx_save(&vcpu->arch.host_fx_image); 5470 kvm_fx_save(&vcpu->arch.host_fx_image);
4925 kvm_fx_restore(&vcpu->arch.guest_fx_image); 5471 kvm_fx_restore(&vcpu->arch.guest_fx_image);
5472 trace_kvm_fpu(1);
4926} 5473}
4927EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4928 5474
4929void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 5475void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4930{ 5476{
@@ -4935,8 +5481,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4935 kvm_fx_save(&vcpu->arch.guest_fx_image); 5481 kvm_fx_save(&vcpu->arch.guest_fx_image);
4936 kvm_fx_restore(&vcpu->arch.host_fx_image); 5482 kvm_fx_restore(&vcpu->arch.host_fx_image);
4937 ++vcpu->stat.fpu_reload; 5483 ++vcpu->stat.fpu_reload;
5484 set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
5485 trace_kvm_fpu(0);
4938} 5486}
4939EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4940 5487
4941void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 5488void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4942{ 5489{
@@ -5088,11 +5635,13 @@ fail:
5088 5635
5089void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 5636void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5090{ 5637{
5638 int idx;
5639
5091 kfree(vcpu->arch.mce_banks); 5640 kfree(vcpu->arch.mce_banks);
5092 kvm_free_lapic(vcpu); 5641 kvm_free_lapic(vcpu);
5093 down_read(&vcpu->kvm->slots_lock); 5642 idx = srcu_read_lock(&vcpu->kvm->srcu);
5094 kvm_mmu_destroy(vcpu); 5643 kvm_mmu_destroy(vcpu);
5095 up_read(&vcpu->kvm->slots_lock); 5644 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5096 free_page((unsigned long)vcpu->arch.pio_data); 5645 free_page((unsigned long)vcpu->arch.pio_data);
5097} 5646}
5098 5647
@@ -5103,6 +5652,12 @@ struct kvm *kvm_arch_create_vm(void)
5103 if (!kvm) 5652 if (!kvm)
5104 return ERR_PTR(-ENOMEM); 5653 return ERR_PTR(-ENOMEM);
5105 5654
5655 kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
5656 if (!kvm->arch.aliases) {
5657 kfree(kvm);
5658 return ERR_PTR(-ENOMEM);
5659 }
5660
5106 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 5661 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5107 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 5662 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5108 5663
@@ -5159,16 +5714,18 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
5159 put_page(kvm->arch.apic_access_page); 5714 put_page(kvm->arch.apic_access_page);
5160 if (kvm->arch.ept_identity_pagetable) 5715 if (kvm->arch.ept_identity_pagetable)
5161 put_page(kvm->arch.ept_identity_pagetable); 5716 put_page(kvm->arch.ept_identity_pagetable);
5717 cleanup_srcu_struct(&kvm->srcu);
5718 kfree(kvm->arch.aliases);
5162 kfree(kvm); 5719 kfree(kvm);
5163} 5720}
5164 5721
5165int kvm_arch_set_memory_region(struct kvm *kvm, 5722int kvm_arch_prepare_memory_region(struct kvm *kvm,
5166 struct kvm_userspace_memory_region *mem, 5723 struct kvm_memory_slot *memslot,
5167 struct kvm_memory_slot old, 5724 struct kvm_memory_slot old,
5725 struct kvm_userspace_memory_region *mem,
5168 int user_alloc) 5726 int user_alloc)
5169{ 5727{
5170 int npages = mem->memory_size >> PAGE_SHIFT; 5728 int npages = memslot->npages;
5171 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
5172 5729
5173 /*To keep backward compatibility with older userspace, 5730 /*To keep backward compatibility with older userspace,
5174 *x86 needs to hanlde !user_alloc case. 5731 *x86 needs to hanlde !user_alloc case.
@@ -5188,26 +5745,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
5188 if (IS_ERR((void *)userspace_addr)) 5745 if (IS_ERR((void *)userspace_addr))
5189 return PTR_ERR((void *)userspace_addr); 5746 return PTR_ERR((void *)userspace_addr);
5190 5747
5191 /* set userspace_addr atomically for kvm_hva_to_rmapp */
5192 spin_lock(&kvm->mmu_lock);
5193 memslot->userspace_addr = userspace_addr; 5748 memslot->userspace_addr = userspace_addr;
5194 spin_unlock(&kvm->mmu_lock);
5195 } else {
5196 if (!old.user_alloc && old.rmap) {
5197 int ret;
5198
5199 down_write(&current->mm->mmap_sem);
5200 ret = do_munmap(current->mm, old.userspace_addr,
5201 old.npages * PAGE_SIZE);
5202 up_write(&current->mm->mmap_sem);
5203 if (ret < 0)
5204 printk(KERN_WARNING
5205 "kvm_vm_ioctl_set_memory_region: "
5206 "failed to munmap memory\n");
5207 }
5208 } 5749 }
5209 } 5750 }
5210 5751
5752
5753 return 0;
5754}
5755
5756void kvm_arch_commit_memory_region(struct kvm *kvm,
5757 struct kvm_userspace_memory_region *mem,
5758 struct kvm_memory_slot old,
5759 int user_alloc)
5760{
5761
5762 int npages = mem->memory_size >> PAGE_SHIFT;
5763
5764 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5765 int ret;
5766
5767 down_write(&current->mm->mmap_sem);
5768 ret = do_munmap(current->mm, old.userspace_addr,
5769 old.npages * PAGE_SIZE);
5770 up_write(&current->mm->mmap_sem);
5771 if (ret < 0)
5772 printk(KERN_WARNING
5773 "kvm_vm_ioctl_set_memory_region: "
5774 "failed to munmap memory\n");
5775 }
5776
5211 spin_lock(&kvm->mmu_lock); 5777 spin_lock(&kvm->mmu_lock);
5212 if (!kvm->arch.n_requested_mmu_pages) { 5778 if (!kvm->arch.n_requested_mmu_pages) {
5213 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 5779 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5216,8 +5782,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
5216 5782
5217 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 5783 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5218 spin_unlock(&kvm->mmu_lock); 5784 spin_unlock(&kvm->mmu_lock);
5219
5220 return 0;
5221} 5785}
5222 5786
5223void kvm_arch_flush_shadow(struct kvm *kvm) 5787void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 5eadea585d2a..2d101639bd8d 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -2,6 +2,7 @@
2#define ARCH_X86_KVM_X86_H 2#define ARCH_X86_KVM_X86_H
3 3
4#include <linux/kvm_host.h> 4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
5 6
6static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 7static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
7{ 8{
@@ -35,4 +36,33 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
35struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 36struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
36 u32 function, u32 index); 37 u32 function, u32 index);
37 38
39static inline bool is_protmode(struct kvm_vcpu *vcpu)
40{
41 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
42}
43
44static inline int is_long_mode(struct kvm_vcpu *vcpu)
45{
46#ifdef CONFIG_X86_64
47 return vcpu->arch.efer & EFER_LMA;
48#else
49 return 0;
50#endif
51}
52
53static inline int is_pae(struct kvm_vcpu *vcpu)
54{
55 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
56}
57
58static inline int is_pse(struct kvm_vcpu *vcpu)
59{
60 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
61}
62
63static inline int is_paging(struct kvm_vcpu *vcpu)
64{
65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
66}
67
38#endif 68#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2226f2c70ea3..5cb3f0f54f47 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -750,6 +750,7 @@ static void __init zone_sizes_init(void)
750 free_area_init_nodes(max_zone_pfns); 750 free_area_init_nodes(max_zone_pfns);
751} 751}
752 752
753#ifndef CONFIG_NO_BOOTMEM
753static unsigned long __init setup_node_bootmem(int nodeid, 754static unsigned long __init setup_node_bootmem(int nodeid,
754 unsigned long start_pfn, 755 unsigned long start_pfn,
755 unsigned long end_pfn, 756 unsigned long end_pfn,
@@ -766,13 +767,14 @@ static unsigned long __init setup_node_bootmem(int nodeid,
766 printk(KERN_INFO " node %d bootmap %08lx - %08lx\n", 767 printk(KERN_INFO " node %d bootmap %08lx - %08lx\n",
767 nodeid, bootmap, bootmap + bootmap_size); 768 nodeid, bootmap, bootmap + bootmap_size);
768 free_bootmem_with_active_regions(nodeid, end_pfn); 769 free_bootmem_with_active_regions(nodeid, end_pfn);
769 early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
770 770
771 return bootmap + bootmap_size; 771 return bootmap + bootmap_size;
772} 772}
773#endif
773 774
774void __init setup_bootmem_allocator(void) 775void __init setup_bootmem_allocator(void)
775{ 776{
777#ifndef CONFIG_NO_BOOTMEM
776 int nodeid; 778 int nodeid;
777 unsigned long bootmap_size, bootmap; 779 unsigned long bootmap_size, bootmap;
778 /* 780 /*
@@ -784,11 +786,13 @@ void __init setup_bootmem_allocator(void)
784 if (bootmap == -1L) 786 if (bootmap == -1L)
785 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 787 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
786 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); 788 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
789#endif
787 790
788 printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 791 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
789 max_pfn_mapped<<PAGE_SHIFT); 792 max_pfn_mapped<<PAGE_SHIFT);
790 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); 793 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
791 794
795#ifndef CONFIG_NO_BOOTMEM
792 for_each_online_node(nodeid) { 796 for_each_online_node(nodeid) {
793 unsigned long start_pfn, end_pfn; 797 unsigned long start_pfn, end_pfn;
794 798
@@ -806,6 +810,7 @@ void __init setup_bootmem_allocator(void)
806 bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn, 810 bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
807 bootmap); 811 bootmap);
808 } 812 }
813#endif
809 814
810 after_bootmem = 1; 815 after_bootmem = 1;
811} 816}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 69ddfbd91135..e9b040e1cde5 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -572,6 +572,7 @@ kernel_physical_mapping_init(unsigned long start,
572void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, 572void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
573 int acpi, int k8) 573 int acpi, int k8)
574{ 574{
575#ifndef CONFIG_NO_BOOTMEM
575 unsigned long bootmap_size, bootmap; 576 unsigned long bootmap_size, bootmap;
576 577
577 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 578 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
@@ -579,13 +580,15 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
579 PAGE_SIZE); 580 PAGE_SIZE);
580 if (bootmap == -1L) 581 if (bootmap == -1L)
581 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 582 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
583 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
582 /* don't touch min_low_pfn */ 584 /* don't touch min_low_pfn */
583 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT, 585 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
584 0, end_pfn); 586 0, end_pfn);
585 e820_register_active_regions(0, start_pfn, end_pfn); 587 e820_register_active_regions(0, start_pfn, end_pfn);
586 free_bootmem_with_active_regions(0, end_pfn); 588 free_bootmem_with_active_regions(0, end_pfn);
587 early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT); 589#else
588 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); 590 e820_register_active_regions(0, start_pfn, end_pfn);
591#endif
589} 592}
590#endif 593#endif
591 594
@@ -974,7 +977,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
974 if (pmd_none(*pmd)) { 977 if (pmd_none(*pmd)) {
975 pte_t entry; 978 pte_t entry;
976 979
977 p = vmemmap_alloc_block(PMD_SIZE, node); 980 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
978 if (!p) 981 if (!p)
979 return -ENOMEM; 982 return -ENOMEM;
980 983
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index b20760ca7244..809baaaf48b1 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -418,7 +418,10 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
418 418
419 for_each_online_node(nid) { 419 for_each_online_node(nid) {
420 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 420 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
421 NODE_DATA(nid)->node_id = nid;
422#ifndef CONFIG_NO_BOOTMEM
421 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 423 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
424#endif
422 } 425 }
423 426
424 setup_bootmem_allocator(); 427 setup_bootmem_allocator();
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 3307ea8bd43a..8948f47fde05 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -163,30 +163,48 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
163 unsigned long end, unsigned long size, 163 unsigned long end, unsigned long size,
164 unsigned long align) 164 unsigned long align)
165{ 165{
166 unsigned long mem = find_e820_area(start, end, size, align); 166 unsigned long mem;
167 void *ptr;
168 167
168 /*
169 * put it on high as possible
170 * something will go with NODE_DATA
171 */
172 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
173 start = MAX_DMA_PFN<<PAGE_SHIFT;
174 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
175 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
176 start = MAX_DMA32_PFN<<PAGE_SHIFT;
177 mem = find_e820_area(start, end, size, align);
178 if (mem != -1L)
179 return __va(mem);
180
181 /* extend the search scope */
182 end = max_pfn_mapped << PAGE_SHIFT;
183 if (end > (MAX_DMA32_PFN<<PAGE_SHIFT))
184 start = MAX_DMA32_PFN<<PAGE_SHIFT;
185 else
186 start = MAX_DMA_PFN<<PAGE_SHIFT;
187 mem = find_e820_area(start, end, size, align);
169 if (mem != -1L) 188 if (mem != -1L)
170 return __va(mem); 189 return __va(mem);
171 190
172 ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 191 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
173 if (ptr == NULL) {
174 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
175 size, nodeid); 192 size, nodeid);
176 return NULL; 193
177 } 194 return NULL;
178 return ptr;
179} 195}
180 196
181/* Initialize bootmem allocator for a node */ 197/* Initialize bootmem allocator for a node */
182void __init 198void __init
183setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) 199setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
184{ 200{
185 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; 201 unsigned long start_pfn, last_pfn, nodedata_phys;
186 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 202 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
187 unsigned long bootmap_start, nodedata_phys;
188 void *bootmap;
189 int nid; 203 int nid;
204#ifndef CONFIG_NO_BOOTMEM
205 unsigned long bootmap_start, bootmap_pages, bootmap_size;
206 void *bootmap;
207#endif
190 208
191 if (!end) 209 if (!end)
192 return; 210 return;
@@ -200,7 +218,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
200 218
201 start = roundup(start, ZONE_ALIGN); 219 start = roundup(start, ZONE_ALIGN);
202 220
203 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 221 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
204 start, end); 222 start, end);
205 223
206 start_pfn = start >> PAGE_SHIFT; 224 start_pfn = start >> PAGE_SHIFT;
@@ -211,14 +229,21 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
211 if (node_data[nodeid] == NULL) 229 if (node_data[nodeid] == NULL)
212 return; 230 return;
213 nodedata_phys = __pa(node_data[nodeid]); 231 nodedata_phys = __pa(node_data[nodeid]);
232 reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
214 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, 233 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
215 nodedata_phys + pgdat_size - 1); 234 nodedata_phys + pgdat_size - 1);
235 nid = phys_to_nid(nodedata_phys);
236 if (nid != nodeid)
237 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
216 238
217 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); 239 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
218 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid]; 240 NODE_DATA(nodeid)->node_id = nodeid;
219 NODE_DATA(nodeid)->node_start_pfn = start_pfn; 241 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
220 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; 242 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
221 243
244#ifndef CONFIG_NO_BOOTMEM
245 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
246
222 /* 247 /*
223 * Find a place for the bootmem map 248 * Find a place for the bootmem map
224 * nodedata_phys could be on other nodes by alloc_bootmem, 249 * nodedata_phys could be on other nodes by alloc_bootmem,
@@ -227,11 +252,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
227 * of alloc_bootmem, that could clash with reserved range 252 * of alloc_bootmem, that could clash with reserved range
228 */ 253 */
229 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); 254 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
230 nid = phys_to_nid(nodedata_phys); 255 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
231 if (nid == nodeid)
232 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
233 else
234 bootmap_start = roundup(start, PAGE_SIZE);
235 /* 256 /*
236 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like 257 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
237 * to use that to align to PAGE_SIZE 258 * to use that to align to PAGE_SIZE
@@ -239,18 +260,13 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
239 bootmap = early_node_mem(nodeid, bootmap_start, end, 260 bootmap = early_node_mem(nodeid, bootmap_start, end,
240 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); 261 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
241 if (bootmap == NULL) { 262 if (bootmap == NULL) {
242 if (nodedata_phys < start || nodedata_phys >= end) { 263 free_early(nodedata_phys, nodedata_phys + pgdat_size);
243 /*
244 * only need to free it if it is from other node
245 * bootmem
246 */
247 if (nid != nodeid)
248 free_bootmem(nodedata_phys, pgdat_size);
249 }
250 node_data[nodeid] = NULL; 264 node_data[nodeid] = NULL;
251 return; 265 return;
252 } 266 }
253 bootmap_start = __pa(bootmap); 267 bootmap_start = __pa(bootmap);
268 reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
269 "BOOTMAP");
254 270
255 bootmap_size = init_bootmem_node(NODE_DATA(nodeid), 271 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
256 bootmap_start >> PAGE_SHIFT, 272 bootmap_start >> PAGE_SHIFT,
@@ -259,31 +275,12 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
259 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n", 275 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
260 bootmap_start, bootmap_start + bootmap_size - 1, 276 bootmap_start, bootmap_start + bootmap_size - 1,
261 bootmap_pages); 277 bootmap_pages);
262
263 free_bootmem_with_active_regions(nodeid, end);
264
265 /*
266 * convert early reserve to bootmem reserve earlier
267 * otherwise early_node_mem could use early reserved mem
268 * on previous node
269 */
270 early_res_to_bootmem(start, end);
271
272 /*
273 * in some case early_node_mem could use alloc_bootmem
274 * to get range on other node, don't reserve that again
275 */
276 if (nid != nodeid)
277 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
278 else
279 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
280 pgdat_size, BOOTMEM_DEFAULT);
281 nid = phys_to_nid(bootmap_start); 278 nid = phys_to_nid(bootmap_start);
282 if (nid != nodeid) 279 if (nid != nodeid)
283 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid); 280 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
284 else 281
285 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, 282 free_bootmem_with_active_regions(nodeid, end);
286 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT); 283#endif
287 284
288 node_set_online(nodeid); 285 node_set_online(nodeid);
289} 286}
@@ -709,6 +706,10 @@ unsigned long __init numa_free_all_bootmem(void)
709 for_each_online_node(i) 706 for_each_online_node(i)
710 pages += free_all_bootmem_node(NODE_DATA(i)); 707 pages += free_all_bootmem_node(NODE_DATA(i));
711 708
709#ifdef CONFIG_NO_BOOTMEM
710 pages += free_all_memory_core_early(MAX_NUMNODES);
711#endif
712
712 return pages; 713 return pages;
713} 714}
714 715
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 39fba37f702f..0b7d3e9593e1 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -14,8 +14,7 @@ obj-$(CONFIG_X86_VISWS) += visws.o
14obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 14obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
15 15
16obj-y += common.o early.o 16obj-y += common.o early.o
17obj-y += amd_bus.o 17obj-y += amd_bus.o bus_numa.o
18obj-$(CONFIG_X86_64) += bus_numa.o
19 18
20ifeq ($(CONFIG_PCI_DEBUG),y) 19ifeq ($(CONFIG_PCI_DEBUG),y)
21EXTRA_CFLAGS += -DDEBUG 20EXTRA_CFLAGS += -DDEBUG
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 95ecbd495955..fc1e8fe07e5c 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -2,11 +2,11 @@
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <linux/topology.h> 3#include <linux/topology.h>
4#include <linux/cpu.h> 4#include <linux/cpu.h>
5#include <linux/range.h>
6
5#include <asm/pci_x86.h> 7#include <asm/pci_x86.h>
6 8
7#ifdef CONFIG_X86_64
8#include <asm/pci-direct.h> 9#include <asm/pci-direct.h>
9#endif
10 10
11#include "bus_numa.h" 11#include "bus_numa.h"
12 12
@@ -15,60 +15,6 @@
15 * also get peer root bus resource for io,mmio 15 * also get peer root bus resource for io,mmio
16 */ 16 */
17 17
18#ifdef CONFIG_X86_64
19
20#define RANGE_NUM 16
21
22struct res_range {
23 size_t start;
24 size_t end;
25};
26
27static void __init update_range(struct res_range *range, size_t start,
28 size_t end)
29{
30 int i;
31 int j;
32
33 for (j = 0; j < RANGE_NUM; j++) {
34 if (!range[j].end)
35 continue;
36
37 if (start <= range[j].start && end >= range[j].end) {
38 range[j].start = 0;
39 range[j].end = 0;
40 continue;
41 }
42
43 if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) {
44 range[j].start = end + 1;
45 continue;
46 }
47
48
49 if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) {
50 range[j].end = start - 1;
51 continue;
52 }
53
54 if (start > range[j].start && end < range[j].end) {
55 /* find the new spare */
56 for (i = 0; i < RANGE_NUM; i++) {
57 if (range[i].end == 0)
58 break;
59 }
60 if (i < RANGE_NUM) {
61 range[i].end = range[j].end;
62 range[i].start = end + 1;
63 } else {
64 printk(KERN_ERR "run of slot in ranges\n");
65 }
66 range[j].end = start - 1;
67 continue;
68 }
69 }
70}
71
72struct pci_hostbridge_probe { 18struct pci_hostbridge_probe {
73 u32 bus; 19 u32 bus;
74 u32 slot; 20 u32 slot;
@@ -111,6 +57,8 @@ static void __init get_pci_mmcfg_amd_fam10h_range(void)
111 fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 57 fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
112} 58}
113 59
60#define RANGE_NUM 16
61
114/** 62/**
115 * early_fill_mp_bus_to_node() 63 * early_fill_mp_bus_to_node()
116 * called before pcibios_scan_root and pci_scan_bus 64 * called before pcibios_scan_root and pci_scan_bus
@@ -130,16 +78,17 @@ static int __init early_fill_mp_bus_info(void)
130 struct pci_root_info *info; 78 struct pci_root_info *info;
131 u32 reg; 79 u32 reg;
132 struct resource *res; 80 struct resource *res;
133 size_t start; 81 u64 start;
134 size_t end; 82 u64 end;
135 struct res_range range[RANGE_NUM]; 83 struct range range[RANGE_NUM];
136 u64 val; 84 u64 val;
137 u32 address; 85 u32 address;
86 bool found;
138 87
139 if (!early_pci_allowed()) 88 if (!early_pci_allowed())
140 return -1; 89 return -1;
141 90
142 found_all_numa_early = 0; 91 found = false;
143 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { 92 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
144 u32 id; 93 u32 id;
145 u16 device; 94 u16 device;
@@ -153,12 +102,12 @@ static int __init early_fill_mp_bus_info(void)
153 device = (id>>16) & 0xffff; 102 device = (id>>16) & 0xffff;
154 if (pci_probes[i].vendor == vendor && 103 if (pci_probes[i].vendor == vendor &&
155 pci_probes[i].device == device) { 104 pci_probes[i].device == device) {
156 found_all_numa_early = 1; 105 found = true;
157 break; 106 break;
158 } 107 }
159 } 108 }
160 109
161 if (!found_all_numa_early) 110 if (!found)
162 return 0; 111 return 0;
163 112
164 pci_root_num = 0; 113 pci_root_num = 0;
@@ -196,7 +145,7 @@ static int __init early_fill_mp_bus_info(void)
196 def_link = (reg >> 8) & 0x03; 145 def_link = (reg >> 8) & 0x03;
197 146
198 memset(range, 0, sizeof(range)); 147 memset(range, 0, sizeof(range));
199 range[0].end = 0xffff; 148 add_range(range, RANGE_NUM, 0, 0, 0xffff + 1);
200 /* io port resource */ 149 /* io port resource */
201 for (i = 0; i < 4; i++) { 150 for (i = 0; i < 4; i++) {
202 reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3)); 151 reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3));
@@ -220,13 +169,13 @@ static int __init early_fill_mp_bus_info(void)
220 169
221 info = &pci_root_info[j]; 170 info = &pci_root_info[j];
222 printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n", 171 printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n",
223 node, link, (u64)start, (u64)end); 172 node, link, start, end);
224 173
225 /* kernel only handle 16 bit only */ 174 /* kernel only handle 16 bit only */
226 if (end > 0xffff) 175 if (end > 0xffff)
227 end = 0xffff; 176 end = 0xffff;
228 update_res(info, start, end, IORESOURCE_IO, 1); 177 update_res(info, start, end, IORESOURCE_IO, 1);
229 update_range(range, start, end); 178 subtract_range(range, RANGE_NUM, start, end + 1);
230 } 179 }
231 /* add left over io port range to def node/link, [0, 0xffff] */ 180 /* add left over io port range to def node/link, [0, 0xffff] */
232 /* find the position */ 181 /* find the position */
@@ -241,29 +190,32 @@ static int __init early_fill_mp_bus_info(void)
241 if (!range[i].end) 190 if (!range[i].end)
242 continue; 191 continue;
243 192
244 update_res(info, range[i].start, range[i].end, 193 update_res(info, range[i].start, range[i].end - 1,
245 IORESOURCE_IO, 1); 194 IORESOURCE_IO, 1);
246 } 195 }
247 } 196 }
248 197
249 memset(range, 0, sizeof(range)); 198 memset(range, 0, sizeof(range));
250 /* 0xfd00000000-0xffffffffff for HT */ 199 /* 0xfd00000000-0xffffffffff for HT */
251 range[0].end = (0xfdULL<<32) - 1; 200 end = cap_resource((0xfdULL<<32) - 1);
201 end++;
202 add_range(range, RANGE_NUM, 0, 0, end);
252 203
253 /* need to take out [0, TOM) for RAM*/ 204 /* need to take out [0, TOM) for RAM*/
254 address = MSR_K8_TOP_MEM1; 205 address = MSR_K8_TOP_MEM1;
255 rdmsrl(address, val); 206 rdmsrl(address, val);
256 end = (val & 0xffffff800000ULL); 207 end = (val & 0xffffff800000ULL);
257 printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20); 208 printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20);
258 if (end < (1ULL<<32)) 209 if (end < (1ULL<<32))
259 update_range(range, 0, end - 1); 210 subtract_range(range, RANGE_NUM, 0, end);
260 211
261 /* get mmconfig */ 212 /* get mmconfig */
262 get_pci_mmcfg_amd_fam10h_range(); 213 get_pci_mmcfg_amd_fam10h_range();
263 /* need to take out mmconf range */ 214 /* need to take out mmconf range */
264 if (fam10h_mmconf_end) { 215 if (fam10h_mmconf_end) {
265 printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end); 216 printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
266 update_range(range, fam10h_mmconf_start, fam10h_mmconf_end); 217 subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
218 fam10h_mmconf_end + 1);
267 } 219 }
268 220
269 /* mmio resource */ 221 /* mmio resource */
@@ -293,7 +245,7 @@ static int __init early_fill_mp_bus_info(void)
293 info = &pci_root_info[j]; 245 info = &pci_root_info[j];
294 246
295 printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]", 247 printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]",
296 node, link, (u64)start, (u64)end); 248 node, link, start, end);
297 /* 249 /*
298 * some sick allocation would have range overlap with fam10h 250 * some sick allocation would have range overlap with fam10h
299 * mmconf range, so need to update start and end. 251 * mmconf range, so need to update start and end.
@@ -318,14 +270,15 @@ static int __init early_fill_mp_bus_info(void)
318 /* we got a hole */ 270 /* we got a hole */
319 endx = fam10h_mmconf_start - 1; 271 endx = fam10h_mmconf_start - 1;
320 update_res(info, start, endx, IORESOURCE_MEM, 0); 272 update_res(info, start, endx, IORESOURCE_MEM, 0);
321 update_range(range, start, endx); 273 subtract_range(range, RANGE_NUM, start,
322 printk(KERN_CONT " ==> [%llx, %llx]", (u64)start, endx); 274 endx + 1);
275 printk(KERN_CONT " ==> [%llx, %llx]", start, endx);
323 start = fam10h_mmconf_end + 1; 276 start = fam10h_mmconf_end + 1;
324 changed = 1; 277 changed = 1;
325 } 278 }
326 if (changed) { 279 if (changed) {
327 if (start <= end) { 280 if (start <= end) {
328 printk(KERN_CONT " %s [%llx, %llx]", endx?"and":"==>", (u64)start, (u64)end); 281 printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end);
329 } else { 282 } else {
330 printk(KERN_CONT "%s\n", endx?"":" ==> none"); 283 printk(KERN_CONT "%s\n", endx?"":" ==> none");
331 continue; 284 continue;
@@ -333,8 +286,9 @@ static int __init early_fill_mp_bus_info(void)
333 } 286 }
334 } 287 }
335 288
336 update_res(info, start, end, IORESOURCE_MEM, 1); 289 update_res(info, cap_resource(start), cap_resource(end),
337 update_range(range, start, end); 290 IORESOURCE_MEM, 1);
291 subtract_range(range, RANGE_NUM, start, end + 1);
338 printk(KERN_CONT "\n"); 292 printk(KERN_CONT "\n");
339 } 293 }
340 294
@@ -348,8 +302,8 @@ static int __init early_fill_mp_bus_info(void)
348 address = MSR_K8_TOP_MEM2; 302 address = MSR_K8_TOP_MEM2;
349 rdmsrl(address, val); 303 rdmsrl(address, val);
350 end = (val & 0xffffff800000ULL); 304 end = (val & 0xffffff800000ULL);
351 printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20); 305 printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20);
352 update_range(range, 1ULL<<32, end - 1); 306 subtract_range(range, RANGE_NUM, 1ULL<<32, end);
353 } 307 }
354 308
355 /* 309 /*
@@ -368,7 +322,8 @@ static int __init early_fill_mp_bus_info(void)
368 if (!range[i].end) 322 if (!range[i].end)
369 continue; 323 continue;
370 324
371 update_res(info, range[i].start, range[i].end, 325 update_res(info, cap_resource(range[i].start),
326 cap_resource(range[i].end - 1),
372 IORESOURCE_MEM, 1); 327 IORESOURCE_MEM, 1);
373 } 328 }
374 } 329 }
@@ -384,24 +339,14 @@ static int __init early_fill_mp_bus_info(void)
384 info->bus_min, info->bus_max, info->node, info->link); 339 info->bus_min, info->bus_max, info->node, info->link);
385 for (j = 0; j < res_num; j++) { 340 for (j = 0; j < res_num; j++) {
386 res = &info->res[j]; 341 res = &info->res[j];
387 printk(KERN_DEBUG "bus: %02x index %x %s: [%llx, %llx]\n", 342 printk(KERN_DEBUG "bus: %02x index %x %pR\n",
388 busnum, j, 343 busnum, j, res);
389 (res->flags & IORESOURCE_IO)?"io port":"mmio",
390 res->start, res->end);
391 } 344 }
392 } 345 }
393 346
394 return 0; 347 return 0;
395} 348}
396 349
397#else /* !CONFIG_X86_64 */
398
399static int __init early_fill_mp_bus_info(void) { return 0; }
400
401#endif /* !CONFIG_X86_64 */
402
403/* common 32/64 bit code */
404
405#define ENABLE_CF8_EXT_CFG (1ULL << 46) 350#define ENABLE_CF8_EXT_CFG (1ULL << 46)
406 351
407static void enable_pci_io_ecs(void *unused) 352static void enable_pci_io_ecs(void *unused)
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 12d54ff3654d..64a122883896 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -1,11 +1,11 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <linux/range.h>
3 4
4#include "bus_numa.h" 5#include "bus_numa.h"
5 6
6int pci_root_num; 7int pci_root_num;
7struct pci_root_info pci_root_info[PCI_ROOT_NR]; 8struct pci_root_info pci_root_info[PCI_ROOT_NR];
8int found_all_numa_early;
9 9
10void x86_pci_root_bus_res_quirks(struct pci_bus *b) 10void x86_pci_root_bus_res_quirks(struct pci_bus *b)
11{ 11{
@@ -21,10 +21,6 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
21 if (!pci_root_num) 21 if (!pci_root_num)
22 return; 22 return;
23 23
24 /* for amd, if only one root bus, don't need to do anything */
25 if (pci_root_num < 2 && found_all_numa_early)
26 return;
27
28 for (i = 0; i < pci_root_num; i++) { 24 for (i = 0; i < pci_root_num; i++) {
29 if (pci_root_info[i].bus_min == b->number) 25 if (pci_root_info[i].bus_min == b->number)
30 break; 26 break;
@@ -52,8 +48,8 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
52 } 48 }
53} 49}
54 50
55void __devinit update_res(struct pci_root_info *info, size_t start, 51void __devinit update_res(struct pci_root_info *info, resource_size_t start,
56 size_t end, unsigned long flags, int merge) 52 resource_size_t end, unsigned long flags, int merge)
57{ 53{
58 int i; 54 int i;
59 struct resource *res; 55 struct resource *res;
@@ -61,25 +57,28 @@ void __devinit update_res(struct pci_root_info *info, size_t start,
61 if (start > end) 57 if (start > end)
62 return; 58 return;
63 59
60 if (start == MAX_RESOURCE)
61 return;
62
64 if (!merge) 63 if (!merge)
65 goto addit; 64 goto addit;
66 65
67 /* try to merge it with old one */ 66 /* try to merge it with old one */
68 for (i = 0; i < info->res_num; i++) { 67 for (i = 0; i < info->res_num; i++) {
69 size_t final_start, final_end; 68 resource_size_t final_start, final_end;
70 size_t common_start, common_end; 69 resource_size_t common_start, common_end;
71 70
72 res = &info->res[i]; 71 res = &info->res[i];
73 if (res->flags != flags) 72 if (res->flags != flags)
74 continue; 73 continue;
75 74
76 common_start = max((size_t)res->start, start); 75 common_start = max(res->start, start);
77 common_end = min((size_t)res->end, end); 76 common_end = min(res->end, end);
78 if (common_start > common_end + 1) 77 if (common_start > common_end + 1)
79 continue; 78 continue;
80 79
81 final_start = min((size_t)res->start, start); 80 final_start = min(res->start, start);
82 final_end = max((size_t)res->end, end); 81 final_end = max(res->end, end);
83 82
84 res->start = final_start; 83 res->start = final_start;
85 res->end = final_end; 84 res->end = final_end;
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
index 731b64ee8d84..804a4b40c31a 100644
--- a/arch/x86/pci/bus_numa.h
+++ b/arch/x86/pci/bus_numa.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_64 1#ifndef __BUS_NUMA_H
2 2#define __BUS_NUMA_H
3/* 3/*
4 * sub bus (transparent) will use entres from 3 to store extra from 4 * sub bus (transparent) will use entres from 3 to store extra from
5 * root, so need to make sure we have enough slot there. 5 * root, so need to make sure we have enough slot there.
@@ -19,8 +19,7 @@ struct pci_root_info {
19#define PCI_ROOT_NR 4 19#define PCI_ROOT_NR 4
20extern int pci_root_num; 20extern int pci_root_num;
21extern struct pci_root_info pci_root_info[PCI_ROOT_NR]; 21extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
22extern int found_all_numa_early;
23 22
24extern void update_res(struct pci_root_info *info, size_t start, 23extern void update_res(struct pci_root_info *info, resource_size_t start,
25 size_t end, unsigned long flags, int merge); 24 resource_size_t end, unsigned long flags, int merge);
26#endif 25#endif
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 5a8fbf8d4cac..dece3eb9c906 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -255,10 +255,6 @@ void __init pcibios_resource_survey(void)
255 */ 255 */
256fs_initcall(pcibios_assign_resources); 256fs_initcall(pcibios_assign_resources);
257 257
258void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b)
259{
260}
261
262/* 258/*
263 * If we set up a device for bus mastering, we need to check the latency 259 * If we set up a device for bus mastering, we need to check the latency
264 * timer as certain crappy BIOSes forget to set it properly. 260 * timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 36daccb68642..b607239c1ba8 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -50,6 +50,7 @@
50#include <asm/traps.h> 50#include <asm/traps.h>
51#include <asm/setup.h> 51#include <asm/setup.h>
52#include <asm/desc.h> 52#include <asm/desc.h>
53#include <asm/pgalloc.h>
53#include <asm/pgtable.h> 54#include <asm/pgtable.h>
54#include <asm/tlbflush.h> 55#include <asm/tlbflush.h>
55#include <asm/reboot.h> 56#include <asm/reboot.h>
@@ -1094,6 +1095,12 @@ asmlinkage void __init xen_start_kernel(void)
1094 1095
1095 __supported_pte_mask |= _PAGE_IOMAP; 1096 __supported_pte_mask |= _PAGE_IOMAP;
1096 1097
1098 /*
1099 * Prevent page tables from being allocated in highmem, even
1100 * if CONFIG_HIGHPTE is enabled.
1101 */
1102 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1103
1097 /* Work out if we support NX */ 1104 /* Work out if we support NX */
1098 x86_configure_nx(); 1105 x86_configure_nx();
1099 1106
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index bf4cd6bfe959..f9eb7de74f42 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1427,23 +1427,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1427#endif 1427#endif
1428} 1428}
1429 1429
1430#ifdef CONFIG_HIGHPTE
1431static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
1432{
1433 pgprot_t prot = PAGE_KERNEL;
1434
1435 if (PagePinned(page))
1436 prot = PAGE_KERNEL_RO;
1437
1438 if (0 && PageHighMem(page))
1439 printk("mapping highpte %lx type %d prot %s\n",
1440 page_to_pfn(page), type,
1441 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
1442
1443 return kmap_atomic_prot(page, type, prot);
1444}
1445#endif
1446
1447#ifdef CONFIG_X86_32 1430#ifdef CONFIG_X86_32
1448static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1431static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1449{ 1432{
@@ -1902,10 +1885,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1902 .alloc_pmd_clone = paravirt_nop, 1885 .alloc_pmd_clone = paravirt_nop,
1903 .release_pmd = xen_release_pmd_init, 1886 .release_pmd = xen_release_pmd_init,
1904 1887
1905#ifdef CONFIG_HIGHPTE
1906 .kmap_atomic_pte = xen_kmap_atomic_pte,
1907#endif
1908
1909#ifdef CONFIG_X86_64 1888#ifdef CONFIG_X86_64
1910 .set_pte = xen_set_pte, 1889 .set_pte = xen_set_pte,
1911#else 1890#else
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 88e15deb8b82..22a2093b5862 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -90,9 +90,9 @@ ENTRY(xen_iret)
90 GET_THREAD_INFO(%eax) 90 GET_THREAD_INFO(%eax)
91 movl TI_cpu(%eax), %eax 91 movl TI_cpu(%eax), %eax
92 movl __per_cpu_offset(,%eax,4), %eax 92 movl __per_cpu_offset(,%eax,4), %eax
93 mov per_cpu__xen_vcpu(%eax), %eax 93 mov xen_vcpu(%eax), %eax
94#else 94#else
95 movl per_cpu__xen_vcpu, %eax 95 movl xen_vcpu, %eax
96#endif 96#endif
97 97
98 /* check IF state we're restoring */ 98 /* check IF state we're restoring */
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 33a4ff45f842..b8c59b889c6e 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -78,7 +78,6 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
78 walk->data -= walk->offset; 78 walk->data -= walk->offset;
79 79
80 if (nbytes && walk->offset & alignmask && !err) { 80 if (nbytes && walk->offset & alignmask && !err) {
81 walk->offset += alignmask - 1;
82 walk->offset = ALIGN(walk->offset, alignmask + 1); 81 walk->offset = ALIGN(walk->offset, alignmask + 1);
83 walk->data += walk->offset; 82 walk->data += walk->offset;
84 83
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 18870906ea06..2bb7348d8d55 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -386,11 +386,13 @@ static int crypto_authenc_encrypt(struct aead_request *req)
386{ 386{
387 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 387 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
388 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 388 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
389 struct ablkcipher_request *abreq = aead_request_ctx(req); 389 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
390 struct crypto_ablkcipher *enc = ctx->enc; 390 struct crypto_ablkcipher *enc = ctx->enc;
391 struct scatterlist *dst = req->dst; 391 struct scatterlist *dst = req->dst;
392 unsigned int cryptlen = req->cryptlen; 392 unsigned int cryptlen = req->cryptlen;
393 u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc); 393 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
394 + ctx->reqoff);
395 u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
394 int err; 396 int err;
395 397
396 ablkcipher_request_set_tfm(abreq, enc); 398 ablkcipher_request_set_tfm(abreq, enc);
@@ -454,7 +456,7 @@ static int crypto_authenc_verify(struct aead_request *req,
454 unsigned int authsize; 456 unsigned int authsize;
455 457
456 areq_ctx->complete = authenc_verify_ahash_done; 458 areq_ctx->complete = authenc_verify_ahash_done;
457 areq_ctx->complete = authenc_verify_ahash_update_done; 459 areq_ctx->update_complete = authenc_verify_ahash_update_done;
458 460
459 ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); 461 ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
460 if (IS_ERR(ohash)) 462 if (IS_ERR(ohash))
@@ -546,10 +548,6 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
546 if (IS_ERR(auth)) 548 if (IS_ERR(auth))
547 return PTR_ERR(auth); 549 return PTR_ERR(auth);
548 550
549 ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
550 crypto_ahash_alignmask(auth),
551 crypto_ahash_alignmask(auth) + 1);
552
553 enc = crypto_spawn_skcipher(&ictx->enc); 551 enc = crypto_spawn_skcipher(&ictx->enc);
554 err = PTR_ERR(enc); 552 err = PTR_ERR(enc);
555 if (IS_ERR(enc)) 553 if (IS_ERR(enc))
@@ -558,13 +556,18 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
558 ctx->auth = auth; 556 ctx->auth = auth;
559 ctx->enc = enc; 557 ctx->enc = enc;
560 558
561 tfm->crt_aead.reqsize = max_t(unsigned int, 559 ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
562 crypto_ahash_reqsize(auth) + ctx->reqoff + 560 crypto_ahash_alignmask(auth),
563 sizeof(struct authenc_request_ctx) + 561 crypto_ahash_alignmask(auth) + 1) +
562 crypto_ablkcipher_ivsize(enc);
563
564 tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) +
565 ctx->reqoff +
566 max_t(unsigned int,
567 crypto_ahash_reqsize(auth) +
564 sizeof(struct ahash_request), 568 sizeof(struct ahash_request),
565 sizeof(struct skcipher_givcrypt_request) + 569 sizeof(struct skcipher_givcrypt_request) +
566 crypto_ablkcipher_reqsize(enc) + 570 crypto_ablkcipher_reqsize(enc));
567 crypto_ablkcipher_ivsize(enc));
568 571
569 return 0; 572 return 0;
570 573
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 704c14115323..ef71318976c7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -31,7 +31,7 @@ struct cryptd_cpu_queue {
31}; 31};
32 32
33struct cryptd_queue { 33struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue; 34 struct cryptd_cpu_queue __percpu *cpu_queue;
35}; 35};
36 36
37struct cryptd_instance_ctx { 37struct cryptd_instance_ctx {
diff --git a/crypto/md5.c b/crypto/md5.c
index 9fda213a592e..30efc7dad891 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -234,6 +234,7 @@ static struct shash_alg alg = {
234 .export = md5_export, 234 .export = md5_export,
235 .import = md5_import, 235 .import = md5_import,
236 .descsize = sizeof(struct md5_state), 236 .descsize = sizeof(struct md5_state),
237 .statesize = sizeof(struct md5_state),
237 .base = { 238 .base = {
238 .cra_name = "md5", 239 .cra_name = "md5",
239 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 240 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7ad48dfc12db..b8725461d887 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
279 /* SRAT: Static Resource Affinity Table */ 279 /* SRAT: Static Resource Affinity Table */
280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, 281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
282 acpi_parse_x2apic_affinity, NR_CPUS); 282 acpi_parse_x2apic_affinity, nr_cpu_ids);
283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
284 acpi_parse_processor_affinity, NR_CPUS); 284 acpi_parse_processor_affinity, nr_cpu_ids);
285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
286 acpi_parse_memory_affinity, 286 acpi_parse_memory_affinity,
287 NR_NODE_MEMBLKS); 287 NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a959f6a07508..d648a9860b88 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -561,7 +561,7 @@ end:
561} 561}
562 562
563int acpi_processor_preregister_performance( 563int acpi_processor_preregister_performance(
564 struct acpi_processor_performance *performance) 564 struct acpi_processor_performance __percpu *performance)
565{ 565{
566 int count, count_target; 566 int count, count_target;
567 int retval = 0; 567 int retval = 0;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8a713f1e9653..919a28558d36 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,6 +11,9 @@
11#include <asm/smp.h> 11#include <asm/smp.h>
12#include "agp.h" 12#include "agp.h"
13 13
14int intel_agp_enabled;
15EXPORT_SYMBOL(intel_agp_enabled);
16
14/* 17/*
15 * If we have Intel graphics, we're not going to have anything other than 18 * If we have Intel graphics, we're not going to have anything other than
16 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent 19 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -65,6 +68,10 @@
65#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 68#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
66#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 69#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
67#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 70#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
71#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
72#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
73#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
74#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
68 75
69/* cover 915 and 945 variants */ 76/* cover 915 and 945 variants */
70#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 77#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -99,7 +106,9 @@
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ 106 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
100 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ 107 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
101 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ 108 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
102 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB) 109 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
110 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
111 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
103 112
104extern int agp_memory_reserved; 113extern int agp_memory_reserved;
105 114
@@ -148,6 +157,25 @@ extern int agp_memory_reserved;
148#define INTEL_I7505_AGPCTRL 0x70 157#define INTEL_I7505_AGPCTRL 0x70
149#define INTEL_I7505_MCHCFG 0x50 158#define INTEL_I7505_MCHCFG 0x50
150 159
160#define SNB_GMCH_CTRL 0x50
161#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
162#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
163#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
164#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
165#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
166#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
167#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
168#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
169#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
170#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
171#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
172#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
173#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
174#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
175#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
176#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
177#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
178
151static const struct aper_size_info_fixed intel_i810_sizes[] = 179static const struct aper_size_info_fixed intel_i810_sizes[] =
152{ 180{
153 {64, 16384, 4}, 181 {64, 16384, 4},
@@ -294,6 +322,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
294 off_t pg_start, int mask_type) 322 off_t pg_start, int mask_type)
295{ 323{
296 int i, j; 324 int i, j;
325 u32 cache_bits = 0;
326
327 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
328 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
329 {
330 cache_bits = I830_PTE_SYSTEM_CACHED;
331 }
297 332
298 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 333 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
299 writel(agp_bridge->driver->mask_memory(agp_bridge, 334 writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -614,7 +649,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
614static void intel_i830_init_gtt_entries(void) 649static void intel_i830_init_gtt_entries(void)
615{ 650{
616 u16 gmch_ctrl; 651 u16 gmch_ctrl;
617 int gtt_entries; 652 int gtt_entries = 0;
618 u8 rdct; 653 u8 rdct;
619 int local = 0; 654 int local = 0;
620 static const int ddt[4] = { 0, 16, 32, 64 }; 655 static const int ddt[4] = { 0, 16, 32, 64 };
@@ -706,6 +741,63 @@ static void intel_i830_init_gtt_entries(void)
706 gtt_entries = 0; 741 gtt_entries = 0;
707 break; 742 break;
708 } 743 }
744 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
745 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
746 /*
747 * SandyBridge has new memory control reg at 0x50.w
748 */
749 u16 snb_gmch_ctl;
750 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
751 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
752 case SNB_GMCH_GMS_STOLEN_32M:
753 gtt_entries = MB(32) - KB(size);
754 break;
755 case SNB_GMCH_GMS_STOLEN_64M:
756 gtt_entries = MB(64) - KB(size);
757 break;
758 case SNB_GMCH_GMS_STOLEN_96M:
759 gtt_entries = MB(96) - KB(size);
760 break;
761 case SNB_GMCH_GMS_STOLEN_128M:
762 gtt_entries = MB(128) - KB(size);
763 break;
764 case SNB_GMCH_GMS_STOLEN_160M:
765 gtt_entries = MB(160) - KB(size);
766 break;
767 case SNB_GMCH_GMS_STOLEN_192M:
768 gtt_entries = MB(192) - KB(size);
769 break;
770 case SNB_GMCH_GMS_STOLEN_224M:
771 gtt_entries = MB(224) - KB(size);
772 break;
773 case SNB_GMCH_GMS_STOLEN_256M:
774 gtt_entries = MB(256) - KB(size);
775 break;
776 case SNB_GMCH_GMS_STOLEN_288M:
777 gtt_entries = MB(288) - KB(size);
778 break;
779 case SNB_GMCH_GMS_STOLEN_320M:
780 gtt_entries = MB(320) - KB(size);
781 break;
782 case SNB_GMCH_GMS_STOLEN_352M:
783 gtt_entries = MB(352) - KB(size);
784 break;
785 case SNB_GMCH_GMS_STOLEN_384M:
786 gtt_entries = MB(384) - KB(size);
787 break;
788 case SNB_GMCH_GMS_STOLEN_416M:
789 gtt_entries = MB(416) - KB(size);
790 break;
791 case SNB_GMCH_GMS_STOLEN_448M:
792 gtt_entries = MB(448) - KB(size);
793 break;
794 case SNB_GMCH_GMS_STOLEN_480M:
795 gtt_entries = MB(480) - KB(size);
796 break;
797 case SNB_GMCH_GMS_STOLEN_512M:
798 gtt_entries = MB(512) - KB(size);
799 break;
800 }
709 } else { 801 } else {
710 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 802 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
711 case I855_GMCH_GMS_STOLEN_1M: 803 case I855_GMCH_GMS_STOLEN_1M:
@@ -1357,6 +1449,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1357 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: 1449 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1358 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: 1450 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1359 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: 1451 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1452 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1453 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1360 *gtt_offset = *gtt_size = MB(2); 1454 *gtt_offset = *gtt_size = MB(2);
1361 break; 1455 break;
1362 default: 1456 default:
@@ -2338,9 +2432,9 @@ static const struct intel_driver_description {
2338 NULL, &intel_g33_driver }, 2432 NULL, &intel_g33_driver },
2339 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2433 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2340 NULL, &intel_g33_driver }, 2434 NULL, &intel_g33_driver },
2341 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview", 2435 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
2342 NULL, &intel_g33_driver }, 2436 NULL, &intel_g33_driver },
2343 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview", 2437 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
2344 NULL, &intel_g33_driver }, 2438 NULL, &intel_g33_driver },
2345 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2439 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2346 "GM45", NULL, &intel_i965_driver }, 2440 "GM45", NULL, &intel_i965_driver },
@@ -2355,13 +2449,17 @@ static const struct intel_driver_description {
2355 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2449 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2356 "G41", NULL, &intel_i965_driver }, 2450 "G41", NULL, &intel_i965_driver },
2357 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, 2451 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
2358 "Ironlake/D", NULL, &intel_i965_driver }, 2452 "HD Graphics", NULL, &intel_i965_driver },
2359 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2453 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2360 "Ironlake/M", NULL, &intel_i965_driver }, 2454 "HD Graphics", NULL, &intel_i965_driver },
2361 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2455 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2362 "Ironlake/MA", NULL, &intel_i965_driver }, 2456 "HD Graphics", NULL, &intel_i965_driver },
2363 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2457 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2364 "Ironlake/MC2", NULL, &intel_i965_driver }, 2458 "HD Graphics", NULL, &intel_i965_driver },
2459 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
2460 "Sandybridge", NULL, &intel_i965_driver },
2461 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
2462 "Sandybridge", NULL, &intel_i965_driver },
2365 { 0, 0, 0, NULL, NULL, NULL } 2463 { 0, 0, 0, NULL, NULL, NULL }
2366}; 2464};
2367 2465
@@ -2371,7 +2469,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2371 struct agp_bridge_data *bridge; 2469 struct agp_bridge_data *bridge;
2372 u8 cap_ptr = 0; 2470 u8 cap_ptr = 0;
2373 struct resource *r; 2471 struct resource *r;
2374 int i; 2472 int i, err;
2375 2473
2376 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 2474 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
2377 2475
@@ -2463,7 +2561,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2463 } 2561 }
2464 2562
2465 pci_set_drvdata(pdev, bridge); 2563 pci_set_drvdata(pdev, bridge);
2466 return agp_add_bridge(bridge); 2564 err = agp_add_bridge(bridge);
2565 if (!err)
2566 intel_agp_enabled = 1;
2567 return err;
2467} 2568}
2468 2569
2469static void __devexit agp_intel_remove(struct pci_dev *pdev) 2570static void __devexit agp_intel_remove(struct pci_dev *pdev)
@@ -2568,6 +2669,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2568 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 2669 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
2569 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 2670 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
2570 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 2671 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
2672 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
2673 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
2571 { } 2674 { }
2572}; 2675};
2573 2676
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 4254457d3911..b861c08263a4 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -158,13 +158,11 @@ static unsigned int cy_isa_addresses[] = {
158 158
159#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses) 159#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
160 160
161#ifdef MODULE
162static long maddr[NR_CARDS]; 161static long maddr[NR_CARDS];
163static int irq[NR_CARDS]; 162static int irq[NR_CARDS];
164 163
165module_param_array(maddr, long, NULL, 0); 164module_param_array(maddr, long, NULL, 0);
166module_param_array(irq, int, NULL, 0); 165module_param_array(irq, int, NULL, 0);
167#endif
168 166
169#endif /* CONFIG_ISA */ 167#endif /* CONFIG_ISA */
170 168
@@ -598,12 +596,6 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
598 save_car = readb(base_addr + (CyCAR << index)); 596 save_car = readb(base_addr + (CyCAR << index));
599 cy_writeb(base_addr + (CyCAR << index), save_xir); 597 cy_writeb(base_addr + (CyCAR << index), save_xir);
600 598
601 /* validate the port# (as configured and open) */
602 if (channel + chip * 4 >= cinfo->nports) {
603 cy_writeb(base_addr + (CySRER << index),
604 readb(base_addr + (CySRER << index)) & ~CyTxRdy);
605 goto end;
606 }
607 info = &cinfo->ports[channel + chip * 4]; 599 info = &cinfo->ports[channel + chip * 4];
608 tty = tty_port_tty_get(&info->port); 600 tty = tty_port_tty_get(&info->port);
609 if (tty == NULL) { 601 if (tty == NULL) {
@@ -3316,13 +3308,10 @@ static int __init cy_detect_isa(void)
3316 unsigned short cy_isa_irq, nboard; 3308 unsigned short cy_isa_irq, nboard;
3317 void __iomem *cy_isa_address; 3309 void __iomem *cy_isa_address;
3318 unsigned short i, j, cy_isa_nchan; 3310 unsigned short i, j, cy_isa_nchan;
3319#ifdef MODULE
3320 int isparam = 0; 3311 int isparam = 0;
3321#endif
3322 3312
3323 nboard = 0; 3313 nboard = 0;
3324 3314
3325#ifdef MODULE
3326 /* Check for module parameters */ 3315 /* Check for module parameters */
3327 for (i = 0; i < NR_CARDS; i++) { 3316 for (i = 0; i < NR_CARDS; i++) {
3328 if (maddr[i] || i) { 3317 if (maddr[i] || i) {
@@ -3332,7 +3321,6 @@ static int __init cy_detect_isa(void)
3332 if (!maddr[i]) 3321 if (!maddr[i])
3333 break; 3322 break;
3334 } 3323 }
3335#endif
3336 3324
3337 /* scan the address table probing for Cyclom-Y/ISA boards */ 3325 /* scan the address table probing for Cyclom-Y/ISA boards */
3338 for (i = 0; i < NR_ISA_ADDRS; i++) { 3326 for (i = 0; i < NR_ISA_ADDRS; i++) {
@@ -3353,11 +3341,10 @@ static int __init cy_detect_isa(void)
3353 iounmap(cy_isa_address); 3341 iounmap(cy_isa_address);
3354 continue; 3342 continue;
3355 } 3343 }
3356#ifdef MODULE 3344
3357 if (isparam && i < NR_CARDS && irq[i]) 3345 if (isparam && i < NR_CARDS && irq[i])
3358 cy_isa_irq = irq[i]; 3346 cy_isa_irq = irq[i];
3359 else 3347 else
3360#endif
3361 /* find out the board's irq by probing */ 3348 /* find out the board's irq by probing */
3362 cy_isa_irq = detect_isa_irq(cy_isa_address); 3349 cy_isa_irq = detect_isa_irq(cy_isa_address);
3363 if (cy_isa_irq == 0) { 3350 if (cy_isa_irq == 0) {
@@ -4208,3 +4195,4 @@ module_exit(cy_cleanup_module);
4208MODULE_LICENSE("GPL"); 4195MODULE_LICENSE("GPL");
4209MODULE_VERSION(CY_VERSION); 4196MODULE_VERSION(CY_VERSION);
4210MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR); 4197MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
4198MODULE_FIRMWARE("cyzfirm.bin");
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 4c3b59be286a..465185fc0f52 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -146,7 +146,7 @@ static void hvc_console_print(struct console *co, const char *b,
146 return; 146 return;
147 147
148 /* This console adapter was removed so it is not usable. */ 148 /* This console adapter was removed so it is not usable. */
149 if (vtermnos[index] < 0) 149 if (vtermnos[index] == -1)
150 return; 150 return;
151 151
152 while (count > 0 || i > 0) { 152 while (count > 0 || i > 0) {
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 517271c762e6..911e1da6def2 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -208,6 +208,7 @@ static int DumpFifoBuffer( char __user *, int);
208 208
209static void ip2_init_board(int, const struct firmware *); 209static void ip2_init_board(int, const struct firmware *);
210static unsigned short find_eisa_board(int); 210static unsigned short find_eisa_board(int);
211static int ip2_setup(char *str);
211 212
212/***************/ 213/***************/
213/* Static Data */ 214/* Static Data */
@@ -263,7 +264,7 @@ static int tracewrap;
263/* Macros */ 264/* Macros */
264/**********/ 265/**********/
265 266
266#if defined(MODULE) && defined(IP2DEBUG_OPEN) 267#ifdef IP2DEBUG_OPEN
267#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \ 268#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \
268 tty->name,(pCh->flags), \ 269 tty->name,(pCh->flags), \
269 tty->count,/*GET_USE_COUNT(module)*/0,s) 270 tty->count,/*GET_USE_COUNT(module)*/0,s)
@@ -285,7 +286,10 @@ MODULE_AUTHOR("Doug McNash");
285MODULE_DESCRIPTION("Computone IntelliPort Plus Driver"); 286MODULE_DESCRIPTION("Computone IntelliPort Plus Driver");
286MODULE_LICENSE("GPL"); 287MODULE_LICENSE("GPL");
287 288
289#define MAX_CMD_STR 50
290
288static int poll_only; 291static int poll_only;
292static char cmd[MAX_CMD_STR];
289 293
290static int Eisa_irq; 294static int Eisa_irq;
291static int Eisa_slot; 295static int Eisa_slot;
@@ -309,6 +313,8 @@ module_param_array(io, int, NULL, 0);
309MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards"); 313MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards");
310module_param(poll_only, bool, 0); 314module_param(poll_only, bool, 0);
311MODULE_PARM_DESC(poll_only, "Do not use card interrupts"); 315MODULE_PARM_DESC(poll_only, "Do not use card interrupts");
316module_param_string(ip2, cmd, MAX_CMD_STR, 0);
317MODULE_PARM_DESC(ip2, "Contains module parameter passed with 'ip2='");
312 318
313/* for sysfs class support */ 319/* for sysfs class support */
314static struct class *ip2_class; 320static struct class *ip2_class;
@@ -487,7 +493,6 @@ static const struct firmware *ip2_request_firmware(void)
487 return fw; 493 return fw;
488} 494}
489 495
490#ifndef MODULE
491/****************************************************************************** 496/******************************************************************************
492 * ip2_setup: 497 * ip2_setup:
493 * str: kernel command line string 498 * str: kernel command line string
@@ -531,7 +536,6 @@ static int __init ip2_setup(char *str)
531 return 1; 536 return 1;
532} 537}
533__setup("ip2=", ip2_setup); 538__setup("ip2=", ip2_setup);
534#endif /* !MODULE */
535 539
536static int __init ip2_loadmain(void) 540static int __init ip2_loadmain(void)
537{ 541{
@@ -539,14 +543,20 @@ static int __init ip2_loadmain(void)
539 int err = 0; 543 int err = 0;
540 i2eBordStrPtr pB = NULL; 544 i2eBordStrPtr pB = NULL;
541 int rc = -1; 545 int rc = -1;
542 struct pci_dev *pdev = NULL;
543 const struct firmware *fw = NULL; 546 const struct firmware *fw = NULL;
547 char *str;
548
549 str = cmd;
544 550
545 if (poll_only) { 551 if (poll_only) {
546 /* Hard lock the interrupts to zero */ 552 /* Hard lock the interrupts to zero */
547 irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0; 553 irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0;
548 } 554 }
549 555
556 /* Check module parameter with 'ip2=' has been passed or not */
557 if (!poll_only && (!strncmp(str, "ip2=", 4)))
558 ip2_setup(str);
559
550 ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0); 560 ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0);
551 561
552 /* process command line arguments to modprobe or 562 /* process command line arguments to modprobe or
@@ -612,6 +622,7 @@ static int __init ip2_loadmain(void)
612 case PCI: 622 case PCI:
613#ifdef CONFIG_PCI 623#ifdef CONFIG_PCI
614 { 624 {
625 struct pci_dev *pdev = NULL;
615 u32 addr; 626 u32 addr;
616 int status; 627 int status;
617 628
@@ -626,7 +637,7 @@ static int __init ip2_loadmain(void)
626 637
627 if (pci_enable_device(pdev)) { 638 if (pci_enable_device(pdev)) {
628 dev_err(&pdev->dev, "can't enable device\n"); 639 dev_err(&pdev->dev, "can't enable device\n");
629 break; 640 goto out;
630 } 641 }
631 ip2config.type[i] = PCI; 642 ip2config.type[i] = PCI;
632 ip2config.pci_dev[i] = pci_dev_get(pdev); 643 ip2config.pci_dev[i] = pci_dev_get(pdev);
@@ -638,6 +649,8 @@ static int __init ip2_loadmain(void)
638 dev_err(&pdev->dev, "I/O address error\n"); 649 dev_err(&pdev->dev, "I/O address error\n");
639 650
640 ip2config.irq[i] = pdev->irq; 651 ip2config.irq[i] = pdev->irq;
652out:
653 pci_dev_put(pdev);
641 } 654 }
642#else 655#else
643 printk(KERN_ERR "IP2: PCI card specified but PCI " 656 printk(KERN_ERR "IP2: PCI card specified but PCI "
@@ -656,7 +669,6 @@ static int __init ip2_loadmain(void)
656 break; 669 break;
657 } /* switch */ 670 } /* switch */
658 } /* for */ 671 } /* for */
659 pci_dev_put(pdev);
660 672
661 for (i = 0; i < IP2_MAX_BOARDS; ++i) { 673 for (i = 0; i < IP2_MAX_BOARDS; ++i) {
662 if (ip2config.addr[i]) { 674 if (ip2config.addr[i]) {
@@ -3197,3 +3209,5 @@ static struct pci_device_id ip2main_pci_tbl[] __devinitdata = {
3197}; 3209};
3198 3210
3199MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl); 3211MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl);
3212
3213MODULE_FIRMWARE("intelliport2.bin");
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 300d5bd6cd06..be2e8f9a27c3 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -113,6 +113,8 @@
113 * 64-bit verification 113 * 64-bit verification
114 */ 114 */
115 115
116#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
117
116#include <linux/module.h> 118#include <linux/module.h>
117#include <linux/firmware.h> 119#include <linux/firmware.h>
118#include <linux/kernel.h> 120#include <linux/kernel.h>
@@ -140,7 +142,6 @@
140#define InterruptTheCard(base) outw(0, (base) + 0xc) 142#define InterruptTheCard(base) outw(0, (base) + 0xc)
141#define ClearInterrupt(base) inw((base) + 0x0a) 143#define ClearInterrupt(base) inw((base) + 0x0a)
142 144
143#define pr_dbg(str...) pr_debug("ISICOM: " str)
144#ifdef DEBUG 145#ifdef DEBUG
145#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c)) 146#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c))
146#else 147#else
@@ -249,8 +250,7 @@ static int lock_card(struct isi_board *card)
249 spin_unlock_irqrestore(&card->card_lock, card->flags); 250 spin_unlock_irqrestore(&card->card_lock, card->flags);
250 msleep(10); 251 msleep(10);
251 } 252 }
252 printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n", 253 pr_warning("Failed to lock Card (0x%lx)\n", card->base);
253 card->base);
254 254
255 return 0; /* Failed to acquire the card! */ 255 return 0; /* Failed to acquire the card! */
256} 256}
@@ -379,13 +379,13 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
379 char *name, const char *routine) 379 char *name, const char *routine)
380{ 380{
381 if (!port) { 381 if (!port) {
382 printk(KERN_WARNING "ISICOM: Warning: bad isicom magic for " 382 pr_warning("Warning: bad isicom magic for dev %s in %s.\n",
383 "dev %s in %s.\n", name, routine); 383 name, routine);
384 return 1; 384 return 1;
385 } 385 }
386 if (port->magic != ISICOM_MAGIC) { 386 if (port->magic != ISICOM_MAGIC) {
387 printk(KERN_WARNING "ISICOM: Warning: NULL isicom port for " 387 pr_warning("Warning: NULL isicom port for dev %s in %s.\n",
388 "dev %s in %s.\n", name, routine); 388 name, routine);
389 return 1; 389 return 1;
390 } 390 }
391 391
@@ -450,8 +450,8 @@ static void isicom_tx(unsigned long _data)
450 if (!(inw(base + 0x02) & (1 << port->channel))) 450 if (!(inw(base + 0x02) & (1 << port->channel)))
451 continue; 451 continue;
452 452
453 pr_dbg("txing %d bytes, port%d.\n", txcount, 453 pr_debug("txing %d bytes, port%d.\n",
454 port->channel + 1); 454 txcount, port->channel + 1);
455 outw((port->channel << isi_card[card].shift_count) | txcount, 455 outw((port->channel << isi_card[card].shift_count) | txcount,
456 base); 456 base);
457 residue = NO; 457 residue = NO;
@@ -547,8 +547,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
547 byte_count = header & 0xff; 547 byte_count = header & 0xff;
548 548
549 if (channel + 1 > card->port_count) { 549 if (channel + 1 > card->port_count) {
550 printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%lx): " 550 pr_warning("%s(0x%lx): %d(channel) > port_count.\n",
551 "%d(channel) > port_count.\n", base, channel+1); 551 __func__, base, channel+1);
552 outw(0x0000, base+0x04); /* enable interrupts */ 552 outw(0x0000, base+0x04); /* enable interrupts */
553 spin_unlock(&card->card_lock); 553 spin_unlock(&card->card_lock);
554 return IRQ_HANDLED; 554 return IRQ_HANDLED;
@@ -582,14 +582,15 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
582 if (port->status & ISI_DCD) { 582 if (port->status & ISI_DCD) {
583 if (!(header & ISI_DCD)) { 583 if (!(header & ISI_DCD)) {
584 /* Carrier has been lost */ 584 /* Carrier has been lost */
585 pr_dbg("interrupt: DCD->low.\n" 585 pr_debug("%s: DCD->low.\n",
586 ); 586 __func__);
587 port->status &= ~ISI_DCD; 587 port->status &= ~ISI_DCD;
588 tty_hangup(tty); 588 tty_hangup(tty);
589 } 589 }
590 } else if (header & ISI_DCD) { 590 } else if (header & ISI_DCD) {
591 /* Carrier has been detected */ 591 /* Carrier has been detected */
592 pr_dbg("interrupt: DCD->high.\n"); 592 pr_debug("%s: DCD->high.\n",
593 __func__);
593 port->status |= ISI_DCD; 594 port->status |= ISI_DCD;
594 wake_up_interruptible(&port->port.open_wait); 595 wake_up_interruptible(&port->port.open_wait);
595 } 596 }
@@ -641,17 +642,19 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
641 break; 642 break;
642 643
643 case 2: /* Statistics */ 644 case 2: /* Statistics */
644 pr_dbg("isicom_interrupt: stats!!!.\n"); 645 pr_debug("%s: stats!!!\n", __func__);
645 break; 646 break;
646 647
647 default: 648 default:
648 pr_dbg("Intr: Unknown code in status packet.\n"); 649 pr_debug("%s: Unknown code in status packet.\n",
650 __func__);
649 break; 651 break;
650 } 652 }
651 } else { /* Data Packet */ 653 } else { /* Data Packet */
652 654
653 count = tty_prepare_flip_string(tty, &rp, byte_count & ~1); 655 count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
654 pr_dbg("Intr: Can rx %d of %d bytes.\n", count, byte_count); 656 pr_debug("%s: Can rx %d of %d bytes.\n",
657 __func__, count, byte_count);
655 word_count = count >> 1; 658 word_count = count >> 1;
656 insw(base, rp, word_count); 659 insw(base, rp, word_count);
657 byte_count -= (word_count << 1); 660 byte_count -= (word_count << 1);
@@ -661,8 +664,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
661 byte_count -= 2; 664 byte_count -= 2;
662 } 665 }
663 if (byte_count > 0) { 666 if (byte_count > 0) {
664 pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping " 667 pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n",
665 "bytes...\n", base, channel + 1); 668 __func__, base, channel + 1);
666 /* drain out unread xtra data */ 669 /* drain out unread xtra data */
667 while (byte_count > 0) { 670 while (byte_count > 0) {
668 inw(base); 671 inw(base);
@@ -888,8 +891,8 @@ static void isicom_shutdown_port(struct isi_port *port)
888 struct isi_board *card = port->card; 891 struct isi_board *card = port->card;
889 892
890 if (--card->count < 0) { 893 if (--card->count < 0) {
891 pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n", 894 pr_debug("%s: bad board(0x%lx) count %d.\n",
892 card->base, card->count); 895 __func__, card->base, card->count);
893 card->count = 0; 896 card->count = 0;
894 } 897 }
895 /* last port was closed, shutdown that board too */ 898 /* last port was closed, shutdown that board too */
@@ -1681,13 +1684,13 @@ static int __init isicom_init(void)
1681 1684
1682 retval = tty_register_driver(isicom_normal); 1685 retval = tty_register_driver(isicom_normal);
1683 if (retval) { 1686 if (retval) {
1684 pr_dbg("Couldn't register the dialin driver\n"); 1687 pr_debug("Couldn't register the dialin driver\n");
1685 goto err_puttty; 1688 goto err_puttty;
1686 } 1689 }
1687 1690
1688 retval = pci_register_driver(&isicom_driver); 1691 retval = pci_register_driver(&isicom_driver);
1689 if (retval < 0) { 1692 if (retval < 0) {
1690 printk(KERN_ERR "ISICOM: Unable to register pci driver.\n"); 1693 pr_err("Unable to register pci driver.\n");
1691 goto err_unrtty; 1694 goto err_unrtty;
1692 } 1695 }
1693 1696
@@ -1717,3 +1720,8 @@ module_exit(isicom_exit);
1717MODULE_AUTHOR("MultiTech"); 1720MODULE_AUTHOR("MultiTech");
1718MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech"); 1721MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
1719MODULE_LICENSE("GPL"); 1722MODULE_LICENSE("GPL");
1723MODULE_FIRMWARE("isi608.bin");
1724MODULE_FIRMWARE("isi608em.bin");
1725MODULE_FIRMWARE("isi616em.bin");
1726MODULE_FIRMWARE("isi4608.bin");
1727MODULE_FIRMWARE("isi4616.bin");
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 63ee3bbc1ce4..166495d6a1d7 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -164,24 +164,25 @@ static unsigned int moxaFuncTout = HZ / 2;
164static unsigned int moxaLowWaterChk; 164static unsigned int moxaLowWaterChk;
165static DEFINE_MUTEX(moxa_openlock); 165static DEFINE_MUTEX(moxa_openlock);
166static DEFINE_SPINLOCK(moxa_lock); 166static DEFINE_SPINLOCK(moxa_lock);
167/* Variables for insmod */ 167
168#ifdef MODULE
169static unsigned long baseaddr[MAX_BOARDS]; 168static unsigned long baseaddr[MAX_BOARDS];
170static unsigned int type[MAX_BOARDS]; 169static unsigned int type[MAX_BOARDS];
171static unsigned int numports[MAX_BOARDS]; 170static unsigned int numports[MAX_BOARDS];
172#endif
173 171
174MODULE_AUTHOR("William Chen"); 172MODULE_AUTHOR("William Chen");
175MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); 173MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
176MODULE_LICENSE("GPL"); 174MODULE_LICENSE("GPL");
177#ifdef MODULE 175MODULE_FIRMWARE("c218tunx.cod");
176MODULE_FIRMWARE("cp204unx.cod");
177MODULE_FIRMWARE("c320tunx.cod");
178
178module_param_array(type, uint, NULL, 0); 179module_param_array(type, uint, NULL, 0);
179MODULE_PARM_DESC(type, "card type: C218=2, C320=4"); 180MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
180module_param_array(baseaddr, ulong, NULL, 0); 181module_param_array(baseaddr, ulong, NULL, 0);
181MODULE_PARM_DESC(baseaddr, "base address"); 182MODULE_PARM_DESC(baseaddr, "base address");
182module_param_array(numports, uint, NULL, 0); 183module_param_array(numports, uint, NULL, 0);
183MODULE_PARM_DESC(numports, "numports (ignored for C218)"); 184MODULE_PARM_DESC(numports, "numports (ignored for C218)");
184#endif 185
185module_param(ttymajor, int, 0); 186module_param(ttymajor, int, 0);
186 187
187/* 188/*
@@ -1024,6 +1025,8 @@ static int __init moxa_init(void)
1024{ 1025{
1025 unsigned int isabrds = 0; 1026 unsigned int isabrds = 0;
1026 int retval = 0; 1027 int retval = 0;
1028 struct moxa_board_conf *brd = moxa_boards;
1029 unsigned int i;
1027 1030
1028 printk(KERN_INFO "MOXA Intellio family driver version %s\n", 1031 printk(KERN_INFO "MOXA Intellio family driver version %s\n",
1029 MOXA_VERSION); 1032 MOXA_VERSION);
@@ -1051,10 +1054,7 @@ static int __init moxa_init(void)
1051 } 1054 }
1052 1055
1053 /* Find the boards defined from module args. */ 1056 /* Find the boards defined from module args. */
1054#ifdef MODULE 1057
1055 {
1056 struct moxa_board_conf *brd = moxa_boards;
1057 unsigned int i;
1058 for (i = 0; i < MAX_BOARDS; i++) { 1058 for (i = 0; i < MAX_BOARDS; i++) {
1059 if (!baseaddr[i]) 1059 if (!baseaddr[i])
1060 break; 1060 break;
@@ -1087,8 +1087,6 @@ static int __init moxa_init(void)
1087 isabrds++; 1087 isabrds++;
1088 } 1088 }
1089 } 1089 }
1090 }
1091#endif
1092 1090
1093#ifdef CONFIG_PCI 1091#ifdef CONFIG_PCI
1094 retval = pci_register_driver(&moxa_pci_driver); 1092 retval = pci_register_driver(&moxa_pci_driver);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 3d923065d9a2..e0c5d2a69046 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -895,8 +895,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
895 if (inb(info->ioaddr + UART_LSR) == 0xff) { 895 if (inb(info->ioaddr + UART_LSR) == 0xff) {
896 spin_unlock_irqrestore(&info->slock, flags); 896 spin_unlock_irqrestore(&info->slock, flags);
897 if (capable(CAP_SYS_ADMIN)) { 897 if (capable(CAP_SYS_ADMIN)) {
898 if (tty) 898 set_bit(TTY_IO_ERROR, &tty->flags);
899 set_bit(TTY_IO_ERROR, &tty->flags);
900 return 0; 899 return 0;
901 } else 900 } else
902 return -ENODEV; 901 return -ENODEV;
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 2ad7d37afbd0..a3f32a15fde4 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -136,10 +136,6 @@ static int debug;
136#define RECEIVE_BUF_MAX 4 136#define RECEIVE_BUF_MAX 4
137 137
138 138
139/* Define all types of vendors and devices to support */
140#define VENDOR1 0x1931 /* Vendor Option */
141#define DEVICE1 0x000c /* HSDPA card */
142
143#define R_IIR 0x0000 /* Interrupt Identity Register */ 139#define R_IIR 0x0000 /* Interrupt Identity Register */
144#define R_FCR 0x0000 /* Flow Control Register */ 140#define R_FCR 0x0000 /* Flow Control Register */
145#define R_IER 0x0004 /* Interrupt Enable Register */ 141#define R_IER 0x0004 /* Interrupt Enable Register */
@@ -371,6 +367,8 @@ struct port {
371 struct mutex tty_sem; 367 struct mutex tty_sem;
372 wait_queue_head_t tty_wait; 368 wait_queue_head_t tty_wait;
373 struct async_icount tty_icount; 369 struct async_icount tty_icount;
370
371 struct nozomi *dc;
374}; 372};
375 373
376/* Private data one for each card in the system */ 374/* Private data one for each card in the system */
@@ -405,7 +403,7 @@ struct buffer {
405 403
406/* Global variables */ 404/* Global variables */
407static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = { 405static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
408 {PCI_DEVICE(VENDOR1, DEVICE1)}, 406 {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */
409 {}, 407 {},
410}; 408};
411 409
@@ -414,6 +412,8 @@ MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
414static struct nozomi *ndevs[NOZOMI_MAX_CARDS]; 412static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
415static struct tty_driver *ntty_driver; 413static struct tty_driver *ntty_driver;
416 414
415static const struct tty_port_operations noz_tty_port_ops;
416
417/* 417/*
418 * find card by tty_index 418 * find card by tty_index
419 */ 419 */
@@ -853,8 +853,6 @@ static int receive_data(enum port_type index, struct nozomi *dc)
853 goto put; 853 goto put;
854 } 854 }
855 855
856 tty_buffer_request_room(tty, size);
857
858 while (size > 0) { 856 while (size > 0) {
859 read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX); 857 read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
860 858
@@ -1473,9 +1471,11 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1473 1471
1474 for (i = 0; i < MAX_PORT; i++) { 1472 for (i = 0; i < MAX_PORT; i++) {
1475 struct device *tty_dev; 1473 struct device *tty_dev;
1476 1474 struct port *port = &dc->port[i];
1477 mutex_init(&dc->port[i].tty_sem); 1475 port->dc = dc;
1478 tty_port_init(&dc->port[i].port); 1476 mutex_init(&port->tty_sem);
1477 tty_port_init(&port->port);
1478 port->port.ops = &noz_tty_port_ops;
1479 tty_dev = tty_register_device(ntty_driver, dc->index_start + i, 1479 tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
1480 &pdev->dev); 1480 &pdev->dev);
1481 1481
@@ -1600,67 +1600,74 @@ static void set_dtr(const struct tty_struct *tty, int dtr)
1600 * ---------------------------------------------------------------------------- 1600 * ----------------------------------------------------------------------------
1601 */ 1601 */
1602 1602
1603/* Called when the userspace process opens the tty, /dev/noz*. */ 1603static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
1604static int ntty_open(struct tty_struct *tty, struct file *file)
1605{ 1604{
1606 struct port *port = get_port_by_tty(tty); 1605 struct port *port = get_port_by_tty(tty);
1607 struct nozomi *dc = get_dc_by_tty(tty); 1606 struct nozomi *dc = get_dc_by_tty(tty);
1608 unsigned long flags; 1607 int ret;
1609
1610 if (!port || !dc || dc->state != NOZOMI_STATE_READY) 1608 if (!port || !dc || dc->state != NOZOMI_STATE_READY)
1611 return -ENODEV; 1609 return -ENODEV;
1612 1610 ret = tty_init_termios(tty);
1613 if (mutex_lock_interruptible(&port->tty_sem)) 1611 if (ret == 0) {
1614 return -ERESTARTSYS; 1612 tty_driver_kref_get(driver);
1615 1613 driver->ttys[tty->index] = tty;
1616 port->port.count++;
1617 dc->open_ttys++;
1618
1619 /* Enable interrupt downlink for channel */
1620 if (port->port.count == 1) {
1621 tty->driver_data = port;
1622 tty_port_tty_set(&port->port, tty);
1623 DBG1("open: %d", port->token_dl);
1624 spin_lock_irqsave(&dc->spin_mutex, flags);
1625 dc->last_ier = dc->last_ier | port->token_dl;
1626 writew(dc->last_ier, dc->reg_ier);
1627 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1628 } 1614 }
1629 mutex_unlock(&port->tty_sem); 1615 return ret;
1630 return 0;
1631} 1616}
1632 1617
1633/* Called when the userspace process close the tty, /dev/noz*. Also 1618static void ntty_cleanup(struct tty_struct *tty)
1634 called immediately if ntty_open fails in which case tty->driver_data 1619{
1635 will be NULL an we exit by the first return */ 1620 tty->driver_data = NULL;
1621}
1636 1622
1637static void ntty_close(struct tty_struct *tty, struct file *file) 1623static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
1638{ 1624{
1639 struct nozomi *dc = get_dc_by_tty(tty); 1625 struct port *port = container_of(tport, struct port, port);
1640 struct port *nport = tty->driver_data; 1626 struct nozomi *dc = port->dc;
1641 struct tty_port *port = &nport->port;
1642 unsigned long flags; 1627 unsigned long flags;
1643 1628
1644 if (!dc || !nport) 1629 DBG1("open: %d", port->token_dl);
1645 return; 1630 spin_lock_irqsave(&dc->spin_mutex, flags);
1631 dc->last_ier = dc->last_ier | port->token_dl;
1632 writew(dc->last_ier, dc->reg_ier);
1633 dc->open_ttys++;
1634 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1635 printk("noz: activated %d: %p\n", tty->index, tport);
1636 return 0;
1637}
1646 1638
1647 /* Users cannot interrupt a close */ 1639static int ntty_open(struct tty_struct *tty, struct file *filp)
1648 mutex_lock(&nport->tty_sem); 1640{
1641 struct port *port = get_port_by_tty(tty);
1642 return tty_port_open(&port->port, tty, filp);
1643}
1649 1644
1650 WARN_ON(!port->count); 1645static void ntty_shutdown(struct tty_port *tport)
1646{
1647 struct port *port = container_of(tport, struct port, port);
1648 struct nozomi *dc = port->dc;
1649 unsigned long flags;
1651 1650
1651 DBG1("close: %d", port->token_dl);
1652 spin_lock_irqsave(&dc->spin_mutex, flags);
1653 dc->last_ier &= ~(port->token_dl);
1654 writew(dc->last_ier, dc->reg_ier);
1652 dc->open_ttys--; 1655 dc->open_ttys--;
1653 port->count--; 1656 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1657 printk("noz: shutdown %p\n", tport);
1658}
1654 1659
1655 if (port->count == 0) { 1660static void ntty_close(struct tty_struct *tty, struct file *filp)
1656 DBG1("close: %d", nport->token_dl); 1661{
1657 tty_port_tty_set(port, NULL); 1662 struct port *port = tty->driver_data;
1658 spin_lock_irqsave(&dc->spin_mutex, flags); 1663 if (port)
1659 dc->last_ier &= ~(nport->token_dl); 1664 tty_port_close(&port->port, tty, filp);
1660 writew(dc->last_ier, dc->reg_ier); 1665}
1661 spin_unlock_irqrestore(&dc->spin_mutex, flags); 1666
1662 } 1667static void ntty_hangup(struct tty_struct *tty)
1663 mutex_unlock(&nport->tty_sem); 1668{
1669 struct port *port = tty->driver_data;
1670 tty_port_hangup(&port->port);
1664} 1671}
1665 1672
1666/* 1673/*
@@ -1680,15 +1687,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
1680 if (!dc || !port) 1687 if (!dc || !port)
1681 return -ENODEV; 1688 return -ENODEV;
1682 1689
1683 if (unlikely(!mutex_trylock(&port->tty_sem))) { 1690 mutex_lock(&port->tty_sem);
1684 /*
1685 * must test lock as tty layer wraps calls
1686 * to this function with BKL
1687 */
1688 dev_err(&dc->pdev->dev, "Would have deadlocked - "
1689 "return EAGAIN\n");
1690 return -EAGAIN;
1691 }
1692 1691
1693 if (unlikely(!port->port.count)) { 1692 if (unlikely(!port->port.count)) {
1694 DBG1(" "); 1693 DBG1(" ");
@@ -1728,25 +1727,23 @@ exit:
1728 * This method is called by the upper tty layer. 1727 * This method is called by the upper tty layer.
1729 * #according to sources N_TTY.c it expects a value >= 0 and 1728 * #according to sources N_TTY.c it expects a value >= 0 and
1730 * does not check for negative values. 1729 * does not check for negative values.
1730 *
1731 * If the port is unplugged report lots of room and let the bits
1732 * dribble away so we don't block anything.
1731 */ 1733 */
1732static int ntty_write_room(struct tty_struct *tty) 1734static int ntty_write_room(struct tty_struct *tty)
1733{ 1735{
1734 struct port *port = tty->driver_data; 1736 struct port *port = tty->driver_data;
1735 int room = 0; 1737 int room = 4096;
1736 const struct nozomi *dc = get_dc_by_tty(tty); 1738 const struct nozomi *dc = get_dc_by_tty(tty);
1737 1739
1738 if (!dc || !port) 1740 if (dc) {
1739 return 0; 1741 mutex_lock(&port->tty_sem);
1740 if (!mutex_trylock(&port->tty_sem)) 1742 if (port->port.count)
1741 return 0; 1743 room = port->fifo_ul.size -
1742 1744 kfifo_len(&port->fifo_ul);
1743 if (!port->port.count) 1745 mutex_unlock(&port->tty_sem);
1744 goto exit; 1746 }
1745
1746 room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
1747
1748exit:
1749 mutex_unlock(&port->tty_sem);
1750 return room; 1747 return room;
1751} 1748}
1752 1749
@@ -1906,10 +1903,16 @@ exit_in_buffer:
1906 return rval; 1903 return rval;
1907} 1904}
1908 1905
1906static const struct tty_port_operations noz_tty_port_ops = {
1907 .activate = ntty_activate,
1908 .shutdown = ntty_shutdown,
1909};
1910
1909static const struct tty_operations tty_ops = { 1911static const struct tty_operations tty_ops = {
1910 .ioctl = ntty_ioctl, 1912 .ioctl = ntty_ioctl,
1911 .open = ntty_open, 1913 .open = ntty_open,
1912 .close = ntty_close, 1914 .close = ntty_close,
1915 .hangup = ntty_hangup,
1913 .write = ntty_write, 1916 .write = ntty_write,
1914 .write_room = ntty_write_room, 1917 .write_room = ntty_write_room,
1915 .unthrottle = ntty_unthrottle, 1918 .unthrottle = ntty_unthrottle,
@@ -1917,6 +1920,8 @@ static const struct tty_operations tty_ops = {
1917 .chars_in_buffer = ntty_chars_in_buffer, 1920 .chars_in_buffer = ntty_chars_in_buffer,
1918 .tiocmget = ntty_tiocmget, 1921 .tiocmget = ntty_tiocmget,
1919 .tiocmset = ntty_tiocmset, 1922 .tiocmset = ntty_tiocmset,
1923 .install = ntty_install,
1924 .cleanup = ntty_cleanup,
1920}; 1925};
1921 1926
1922/* Module initialization */ 1927/* Module initialization */
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 452370af95de..986aa606a6b6 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -658,8 +658,7 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
658 info->mon.char_max = char_count; 658 info->mon.char_max = char_count;
659 info->mon.char_last = char_count; 659 info->mon.char_last = char_count;
660#endif 660#endif
661 len = tty_buffer_request_room(tty, char_count); 661 while (char_count--) {
662 while (len--) {
663 data = base_addr[CyRDR]; 662 data = base_addr[CyRDR];
664 tty_insert_flip_char(tty, data, TTY_NORMAL); 663 tty_insert_flip_char(tty, data, TTY_NORMAL);
665#ifdef CYCLOM_16Y_HACK 664#ifdef CYCLOM_16Y_HACK
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 268e17f9ec3f..07ac14d949ce 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -646,8 +646,6 @@ static void sx_receive(struct specialix_board *bp)
646 dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count); 646 dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
647 port->hits[count > 8 ? 9 : count]++; 647 port->hits[count > 8 ? 9 : count]++;
648 648
649 tty_buffer_request_room(tty, count);
650
651 while (count--) 649 while (count--)
652 tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL); 650 tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL);
653 tty_flip_buffer_push(tty); 651 tty_flip_buffer_push(tty);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 4846b73ef28d..0658fc548222 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -2031,7 +2031,7 @@ static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2031 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2031 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2032 return 0; 2032 return 0;
2033 2033
2034 if (!tty || !info->xmit_buf) 2034 if (!info->xmit_buf)
2035 return 0; 2035 return 0;
2036 2036
2037 spin_lock_irqsave(&info->irq_spinlock, flags); 2037 spin_lock_irqsave(&info->irq_spinlock, flags);
@@ -2121,7 +2121,7 @@ static int mgsl_write(struct tty_struct * tty,
2121 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2121 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2122 goto cleanup; 2122 goto cleanup;
2123 2123
2124 if (!tty || !info->xmit_buf) 2124 if (!info->xmit_buf)
2125 goto cleanup; 2125 goto cleanup;
2126 2126
2127 if ( info->params.mode == MGSL_MODE_HDLC || 2127 if ( info->params.mode == MGSL_MODE_HDLC ||
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 8678f0c8699d..4561ce2fba6d 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -468,7 +468,7 @@ static unsigned int free_tbuf_count(struct slgt_info *info);
468static unsigned int tbuf_bytes(struct slgt_info *info); 468static unsigned int tbuf_bytes(struct slgt_info *info);
469static void reset_tbufs(struct slgt_info *info); 469static void reset_tbufs(struct slgt_info *info);
470static void tdma_reset(struct slgt_info *info); 470static void tdma_reset(struct slgt_info *info);
471static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); 471static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
472 472
473static void get_signals(struct slgt_info *info); 473static void get_signals(struct slgt_info *info);
474static void set_signals(struct slgt_info *info); 474static void set_signals(struct slgt_info *info);
@@ -813,59 +813,32 @@ static int write(struct tty_struct *tty,
813 int ret = 0; 813 int ret = 0;
814 struct slgt_info *info = tty->driver_data; 814 struct slgt_info *info = tty->driver_data;
815 unsigned long flags; 815 unsigned long flags;
816 unsigned int bufs_needed;
817 816
818 if (sanity_check(info, tty->name, "write")) 817 if (sanity_check(info, tty->name, "write"))
819 goto cleanup; 818 return -EIO;
819
820 DBGINFO(("%s write count=%d\n", info->device_name, count)); 820 DBGINFO(("%s write count=%d\n", info->device_name, count));
821 821
822 if (!info->tx_buf) 822 if (!info->tx_buf || (count > info->max_frame_size))
823 goto cleanup; 823 return -EIO;
824 824
825 if (count > info->max_frame_size) { 825 if (!count || tty->stopped || tty->hw_stopped)
826 ret = -EIO; 826 return 0;
827 goto cleanup;
828 }
829 827
830 if (!count) 828 spin_lock_irqsave(&info->lock, flags);
831 goto cleanup;
832 829
833 if (!info->tx_active && info->tx_count) { 830 if (info->tx_count) {
834 /* send accumulated data from send_char() */ 831 /* send accumulated data from send_char() */
835 tx_load(info, info->tx_buf, info->tx_count); 832 if (!tx_load(info, info->tx_buf, info->tx_count))
836 goto start; 833 goto cleanup;
834 info->tx_count = 0;
837 } 835 }
838 bufs_needed = (count/DMABUFSIZE);
839 if (count % DMABUFSIZE)
840 ++bufs_needed;
841 if (bufs_needed > free_tbuf_count(info))
842 goto cleanup;
843 836
844 ret = info->tx_count = count; 837 if (tx_load(info, buf, count))
845 tx_load(info, buf, count); 838 ret = count;
846 goto start;
847
848start:
849 if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
850 spin_lock_irqsave(&info->lock,flags);
851 if (!info->tx_active)
852 tx_start(info);
853 else if (!(rd_reg32(info, TDCSR) & BIT0)) {
854 /* transmit still active but transmit DMA stopped */
855 unsigned int i = info->tbuf_current;
856 if (!i)
857 i = info->tbuf_count;
858 i--;
859 /* if DMA buf unsent must try later after tx idle */
860 if (desc_count(info->tbufs[i]))
861 ret = 0;
862 }
863 if (ret > 0)
864 update_tx_timer(info);
865 spin_unlock_irqrestore(&info->lock,flags);
866 }
867 839
868cleanup: 840cleanup:
841 spin_unlock_irqrestore(&info->lock, flags);
869 DBGINFO(("%s write rc=%d\n", info->device_name, ret)); 842 DBGINFO(("%s write rc=%d\n", info->device_name, ret));
870 return ret; 843 return ret;
871} 844}
@@ -882,7 +855,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch)
882 if (!info->tx_buf) 855 if (!info->tx_buf)
883 return 0; 856 return 0;
884 spin_lock_irqsave(&info->lock,flags); 857 spin_lock_irqsave(&info->lock,flags);
885 if (!info->tx_active && (info->tx_count < info->max_frame_size)) { 858 if (info->tx_count < info->max_frame_size) {
886 info->tx_buf[info->tx_count++] = ch; 859 info->tx_buf[info->tx_count++] = ch;
887 ret = 1; 860 ret = 1;
888 } 861 }
@@ -981,10 +954,8 @@ static void flush_chars(struct tty_struct *tty)
981 DBGINFO(("%s flush_chars start transmit\n", info->device_name)); 954 DBGINFO(("%s flush_chars start transmit\n", info->device_name));
982 955
983 spin_lock_irqsave(&info->lock,flags); 956 spin_lock_irqsave(&info->lock,flags);
984 if (!info->tx_active && info->tx_count) { 957 if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
985 tx_load(info, info->tx_buf,info->tx_count); 958 info->tx_count = 0;
986 tx_start(info);
987 }
988 spin_unlock_irqrestore(&info->lock,flags); 959 spin_unlock_irqrestore(&info->lock,flags);
989} 960}
990 961
@@ -997,10 +968,9 @@ static void flush_buffer(struct tty_struct *tty)
997 return; 968 return;
998 DBGINFO(("%s flush_buffer\n", info->device_name)); 969 DBGINFO(("%s flush_buffer\n", info->device_name));
999 970
1000 spin_lock_irqsave(&info->lock,flags); 971 spin_lock_irqsave(&info->lock, flags);
1001 if (!info->tx_active) 972 info->tx_count = 0;
1002 info->tx_count = 0; 973 spin_unlock_irqrestore(&info->lock, flags);
1003 spin_unlock_irqrestore(&info->lock,flags);
1004 974
1005 tty_wakeup(tty); 975 tty_wakeup(tty);
1006} 976}
@@ -1033,12 +1003,10 @@ static void tx_release(struct tty_struct *tty)
1033 if (sanity_check(info, tty->name, "tx_release")) 1003 if (sanity_check(info, tty->name, "tx_release"))
1034 return; 1004 return;
1035 DBGINFO(("%s tx_release\n", info->device_name)); 1005 DBGINFO(("%s tx_release\n", info->device_name));
1036 spin_lock_irqsave(&info->lock,flags); 1006 spin_lock_irqsave(&info->lock, flags);
1037 if (!info->tx_active && info->tx_count) { 1007 if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
1038 tx_load(info, info->tx_buf, info->tx_count); 1008 info->tx_count = 0;
1039 tx_start(info); 1009 spin_unlock_irqrestore(&info->lock, flags);
1040 }
1041 spin_unlock_irqrestore(&info->lock,flags);
1042} 1010}
1043 1011
1044/* 1012/*
@@ -1506,27 +1474,25 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
1506 1474
1507 DBGINFO(("%s hdlc_xmit\n", dev->name)); 1475 DBGINFO(("%s hdlc_xmit\n", dev->name));
1508 1476
1477 if (!skb->len)
1478 return NETDEV_TX_OK;
1479
1509 /* stop sending until this frame completes */ 1480 /* stop sending until this frame completes */
1510 netif_stop_queue(dev); 1481 netif_stop_queue(dev);
1511 1482
1512 /* copy data to device buffers */
1513 info->tx_count = skb->len;
1514 tx_load(info, skb->data, skb->len);
1515
1516 /* update network statistics */ 1483 /* update network statistics */
1517 dev->stats.tx_packets++; 1484 dev->stats.tx_packets++;
1518 dev->stats.tx_bytes += skb->len; 1485 dev->stats.tx_bytes += skb->len;
1519 1486
1520 /* done with socket buffer, so free it */
1521 dev_kfree_skb(skb);
1522
1523 /* save start time for transmit timeout detection */ 1487 /* save start time for transmit timeout detection */
1524 dev->trans_start = jiffies; 1488 dev->trans_start = jiffies;
1525 1489
1526 spin_lock_irqsave(&info->lock,flags); 1490 spin_lock_irqsave(&info->lock, flags);
1527 tx_start(info); 1491 tx_load(info, skb->data, skb->len);
1528 update_tx_timer(info); 1492 spin_unlock_irqrestore(&info->lock, flags);
1529 spin_unlock_irqrestore(&info->lock,flags); 1493
1494 /* done with socket buffer, so free it */
1495 dev_kfree_skb(skb);
1530 1496
1531 return NETDEV_TX_OK; 1497 return NETDEV_TX_OK;
1532} 1498}
@@ -2180,7 +2146,7 @@ static void isr_serial(struct slgt_info *info)
2180 2146
2181 if (info->params.mode == MGSL_MODE_ASYNC) { 2147 if (info->params.mode == MGSL_MODE_ASYNC) {
2182 if (status & IRQ_TXIDLE) { 2148 if (status & IRQ_TXIDLE) {
2183 if (info->tx_count) 2149 if (info->tx_active)
2184 isr_txeom(info, status); 2150 isr_txeom(info, status);
2185 } 2151 }
2186 if (info->rx_pio && (status & IRQ_RXDATA)) 2152 if (info->rx_pio && (status & IRQ_RXDATA))
@@ -2276,13 +2242,42 @@ static void isr_tdma(struct slgt_info *info)
2276 } 2242 }
2277} 2243}
2278 2244
2245/*
2246 * return true if there are unsent tx DMA buffers, otherwise false
2247 *
2248 * if there are unsent buffers then info->tbuf_start
2249 * is set to index of first unsent buffer
2250 */
2251static bool unsent_tbufs(struct slgt_info *info)
2252{
2253 unsigned int i = info->tbuf_current;
2254 bool rc = false;
2255
2256 /*
2257 * search backwards from last loaded buffer (precedes tbuf_current)
2258 * for first unsent buffer (desc_count > 0)
2259 */
2260
2261 do {
2262 if (i)
2263 i--;
2264 else
2265 i = info->tbuf_count - 1;
2266 if (!desc_count(info->tbufs[i]))
2267 break;
2268 info->tbuf_start = i;
2269 rc = true;
2270 } while (i != info->tbuf_current);
2271
2272 return rc;
2273}
2274
2279static void isr_txeom(struct slgt_info *info, unsigned short status) 2275static void isr_txeom(struct slgt_info *info, unsigned short status)
2280{ 2276{
2281 DBGISR(("%s txeom status=%04x\n", info->device_name, status)); 2277 DBGISR(("%s txeom status=%04x\n", info->device_name, status));
2282 2278
2283 slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER); 2279 slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
2284 tdma_reset(info); 2280 tdma_reset(info);
2285 reset_tbufs(info);
2286 if (status & IRQ_TXUNDER) { 2281 if (status & IRQ_TXUNDER) {
2287 unsigned short val = rd_reg16(info, TCR); 2282 unsigned short val = rd_reg16(info, TCR);
2288 wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */ 2283 wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
@@ -2297,8 +2292,12 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
2297 info->icount.txok++; 2292 info->icount.txok++;
2298 } 2293 }
2299 2294
2295 if (unsent_tbufs(info)) {
2296 tx_start(info);
2297 update_tx_timer(info);
2298 return;
2299 }
2300 info->tx_active = false; 2300 info->tx_active = false;
2301 info->tx_count = 0;
2302 2301
2303 del_timer(&info->tx_timer); 2302 del_timer(&info->tx_timer);
2304 2303
@@ -3949,7 +3948,7 @@ static void tx_start(struct slgt_info *info)
3949 info->tx_enabled = true; 3948 info->tx_enabled = true;
3950 } 3949 }
3951 3950
3952 if (info->tx_count) { 3951 if (desc_count(info->tbufs[info->tbuf_start])) {
3953 info->drop_rts_on_tx_done = false; 3952 info->drop_rts_on_tx_done = false;
3954 3953
3955 if (info->params.mode != MGSL_MODE_ASYNC) { 3954 if (info->params.mode != MGSL_MODE_ASYNC) {
@@ -4772,25 +4771,36 @@ static unsigned int tbuf_bytes(struct slgt_info *info)
4772} 4771}
4773 4772
4774/* 4773/*
4775 * load transmit DMA buffer(s) with data 4774 * load data into transmit DMA buffer ring and start transmitter if needed
4775 * return true if data accepted, otherwise false (buffers full)
4776 */ 4776 */
4777static void tx_load(struct slgt_info *info, const char *buf, unsigned int size) 4777static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
4778{ 4778{
4779 unsigned short count; 4779 unsigned short count;
4780 unsigned int i; 4780 unsigned int i;
4781 struct slgt_desc *d; 4781 struct slgt_desc *d;
4782 4782
4783 if (size == 0) 4783 /* check required buffer space */
4784 return; 4784 if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
4785 return false;
4785 4786
4786 DBGDATA(info, buf, size, "tx"); 4787 DBGDATA(info, buf, size, "tx");
4787 4788
4789 /*
4790 * copy data to one or more DMA buffers in circular ring
4791 * tbuf_start = first buffer for this data
4792 * tbuf_current = next free buffer
4793 *
4794 * Copy all data before making data visible to DMA controller by
4795 * setting descriptor count of the first buffer.
4796 * This prevents an active DMA controller from reading the first DMA
4797 * buffers of a frame and stopping before the final buffers are filled.
4798 */
4799
4788 info->tbuf_start = i = info->tbuf_current; 4800 info->tbuf_start = i = info->tbuf_current;
4789 4801
4790 while (size) { 4802 while (size) {
4791 d = &info->tbufs[i]; 4803 d = &info->tbufs[i];
4792 if (++i == info->tbuf_count)
4793 i = 0;
4794 4804
4795 count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size); 4805 count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
4796 memcpy(d->buf, buf, count); 4806 memcpy(d->buf, buf, count);
@@ -4808,11 +4818,27 @@ static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
4808 else 4818 else
4809 set_desc_eof(*d, 0); 4819 set_desc_eof(*d, 0);
4810 4820
4811 set_desc_count(*d, count); 4821 /* set descriptor count for all but first buffer */
4822 if (i != info->tbuf_start)
4823 set_desc_count(*d, count);
4812 d->buf_count = count; 4824 d->buf_count = count;
4825
4826 if (++i == info->tbuf_count)
4827 i = 0;
4813 } 4828 }
4814 4829
4815 info->tbuf_current = i; 4830 info->tbuf_current = i;
4831
4832 /* set first buffer count to make new data visible to DMA controller */
4833 d = &info->tbufs[info->tbuf_start];
4834 set_desc_count(*d, d->buf_count);
4835
4836 /* start transmitter if needed and update transmit timeout */
4837 if (!info->tx_active)
4838 tx_start(info);
4839 update_tx_timer(info);
4840
4841 return true;
4816} 4842}
4817 4843
4818static int register_test(struct slgt_info *info) 4844static int register_test(struct slgt_info *info)
@@ -4934,9 +4960,7 @@ static int loopback_test(struct slgt_info *info)
4934 spin_lock_irqsave(&info->lock,flags); 4960 spin_lock_irqsave(&info->lock,flags);
4935 async_mode(info); 4961 async_mode(info);
4936 rx_start(info); 4962 rx_start(info);
4937 info->tx_count = count;
4938 tx_load(info, buf, count); 4963 tx_load(info, buf, count);
4939 tx_start(info);
4940 spin_unlock_irqrestore(&info->lock, flags); 4964 spin_unlock_irqrestore(&info->lock, flags);
4941 4965
4942 /* wait for receive complete */ 4966 /* wait for receive complete */
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 66fa4e10d76b..af8d97715728 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -231,9 +231,10 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
231EXPORT_SYMBOL_GPL(tty_buffer_request_room); 231EXPORT_SYMBOL_GPL(tty_buffer_request_room);
232 232
233/** 233/**
234 * tty_insert_flip_string - Add characters to the tty buffer 234 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
235 * @tty: tty structure 235 * @tty: tty structure
236 * @chars: characters 236 * @chars: characters
237 * @flag: flag value for each character
237 * @size: size 238 * @size: size
238 * 239 *
239 * Queue a series of bytes to the tty buffering. All the characters 240 * Queue a series of bytes to the tty buffering. All the characters
@@ -242,18 +243,19 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
242 * Locking: Called functions may take tty->buf.lock 243 * Locking: Called functions may take tty->buf.lock
243 */ 244 */
244 245
245int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, 246int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
246 size_t size) 247 const unsigned char *chars, char flag, size_t size)
247{ 248{
248 int copied = 0; 249 int copied = 0;
249 do { 250 do {
250 int space = tty_buffer_request_room(tty, size - copied); 251 int goal = min(size - copied, TTY_BUFFER_PAGE);
252 int space = tty_buffer_request_room(tty, goal);
251 struct tty_buffer *tb = tty->buf.tail; 253 struct tty_buffer *tb = tty->buf.tail;
252 /* If there is no space then tb may be NULL */ 254 /* If there is no space then tb may be NULL */
253 if (unlikely(space == 0)) 255 if (unlikely(space == 0))
254 break; 256 break;
255 memcpy(tb->char_buf_ptr + tb->used, chars, space); 257 memcpy(tb->char_buf_ptr + tb->used, chars, space);
256 memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); 258 memset(tb->flag_buf_ptr + tb->used, flag, space);
257 tb->used += space; 259 tb->used += space;
258 copied += space; 260 copied += space;
259 chars += space; 261 chars += space;
@@ -262,7 +264,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
262 } while (unlikely(size > copied)); 264 } while (unlikely(size > copied));
263 return copied; 265 return copied;
264} 266}
265EXPORT_SYMBOL(tty_insert_flip_string); 267EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
266 268
267/** 269/**
268 * tty_insert_flip_string_flags - Add characters to the tty buffer 270 * tty_insert_flip_string_flags - Add characters to the tty buffer
@@ -283,7 +285,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
283{ 285{
284 int copied = 0; 286 int copied = 0;
285 do { 287 do {
286 int space = tty_buffer_request_room(tty, size - copied); 288 int goal = min(size - copied, TTY_BUFFER_PAGE);
289 int space = tty_buffer_request_room(tty, goal);
287 struct tty_buffer *tb = tty->buf.tail; 290 struct tty_buffer *tb = tty->buf.tail;
288 /* If there is no space then tb may be NULL */ 291 /* If there is no space then tb may be NULL */
289 if (unlikely(space == 0)) 292 if (unlikely(space == 0))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 3f653f7d849f..500e740ec5e4 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -706,12 +706,13 @@ static void tty_reset_termios(struct tty_struct *tty)
706/** 706/**
707 * tty_ldisc_reinit - reinitialise the tty ldisc 707 * tty_ldisc_reinit - reinitialise the tty ldisc
708 * @tty: tty to reinit 708 * @tty: tty to reinit
709 * @ldisc: line discipline to reinitialize
709 * 710 *
710 * Switch the tty back to N_TTY line discipline and leave the 711 * Switch the tty to a line discipline and leave the ldisc
711 * ldisc state closed 712 * state closed
712 */ 713 */
713 714
714static void tty_ldisc_reinit(struct tty_struct *tty) 715static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
715{ 716{
716 struct tty_ldisc *ld; 717 struct tty_ldisc *ld;
717 718
@@ -721,10 +722,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
721 /* 722 /*
722 * Switch the line discipline back 723 * Switch the line discipline back
723 */ 724 */
724 ld = tty_ldisc_get(N_TTY); 725 ld = tty_ldisc_get(ldisc);
725 BUG_ON(IS_ERR(ld)); 726 BUG_ON(IS_ERR(ld));
726 tty_ldisc_assign(tty, ld); 727 tty_ldisc_assign(tty, ld);
727 tty_set_termios_ldisc(tty, N_TTY); 728 tty_set_termios_ldisc(tty, ldisc);
728} 729}
729 730
730/** 731/**
@@ -745,6 +746,8 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
745void tty_ldisc_hangup(struct tty_struct *tty) 746void tty_ldisc_hangup(struct tty_struct *tty)
746{ 747{
747 struct tty_ldisc *ld; 748 struct tty_ldisc *ld;
749 int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
750 int err = 0;
748 751
749 /* 752 /*
750 * FIXME! What are the locking issues here? This may me overdoing 753 * FIXME! What are the locking issues here? This may me overdoing
@@ -772,25 +775,32 @@ void tty_ldisc_hangup(struct tty_struct *tty)
772 wake_up_interruptible_poll(&tty->read_wait, POLLIN); 775 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
773 /* 776 /*
774 * Shutdown the current line discipline, and reset it to 777 * Shutdown the current line discipline, and reset it to
775 * N_TTY. 778 * N_TTY if need be.
779 *
780 * Avoid racing set_ldisc or tty_ldisc_release
776 */ 781 */
777 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { 782 mutex_lock(&tty->ldisc_mutex);
778 /* Avoid racing set_ldisc or tty_ldisc_release */ 783 tty_ldisc_halt(tty);
779 mutex_lock(&tty->ldisc_mutex); 784 /* At this point we have a closed ldisc and we want to
780 tty_ldisc_halt(tty); 785 reopen it. We could defer this to the next open but
781 if (tty->ldisc) { /* Not yet closed */ 786 it means auditing a lot of other paths so this is
782 /* Switch back to N_TTY */ 787 a FIXME */
783 tty_ldisc_reinit(tty); 788 if (tty->ldisc) { /* Not yet closed */
784 /* At this point we have a closed ldisc and we want to 789 if (reset == 0) {
785 reopen it. We could defer this to the next open but 790 tty_ldisc_reinit(tty, tty->termios->c_line);
786 it means auditing a lot of other paths so this is 791 err = tty_ldisc_open(tty, tty->ldisc);
787 a FIXME */ 792 }
793 /* If the re-open fails or we reset then go to N_TTY. The
794 N_TTY open cannot fail */
795 if (reset || err) {
796 tty_ldisc_reinit(tty, N_TTY);
788 WARN_ON(tty_ldisc_open(tty, tty->ldisc)); 797 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
789 tty_ldisc_enable(tty);
790 } 798 }
791 mutex_unlock(&tty->ldisc_mutex); 799 tty_ldisc_enable(tty);
792 tty_reset_termios(tty);
793 } 800 }
801 mutex_unlock(&tty->ldisc_mutex);
802 if (reset)
803 tty_reset_termios(tty);
794} 804}
795 805
796/** 806/**
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 6aa10284104a..87778dcf8727 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
888 ret = -EFAULT; 888 ret = -EFAULT;
889 goto out; 889 goto out;
890 } 890 }
891 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { 891 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) {
892 ret = -EINVAL; 892 ret = -EINVAL;
893 goto out; 893 goto out;
894 } 894 }
@@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc)
1622 * telling it that it has acquired. Also check if it has died and 1622 * telling it that it has acquired. Also check if it has died and
1623 * clean up (similar to logic employed in change_console()) 1623 * clean up (similar to logic employed in change_console())
1624 */ 1624 */
1625 if (vc->vt_mode.mode == VT_PROCESS) { 1625 if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
1626 /* 1626 /*
1627 * Send the signal as privileged - kill_pid() will 1627 * Send the signal as privileged - kill_pid() will
1628 * tell us if the process has gone or something else 1628 * tell us if the process has gone or something else
@@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc)
1682 * vt to auto control. 1682 * vt to auto control.
1683 */ 1683 */
1684 vc = vc_cons[fg_console].d; 1684 vc = vc_cons[fg_console].d;
1685 if (vc->vt_mode.mode == VT_PROCESS) { 1685 if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
1686 /* 1686 /*
1687 * Send the signal as privileged - kill_pid() will 1687 * Send the signal as privileged - kill_pid() will
1688 * tell us if the process has gone or something else 1688 * tell us if the process has gone or something else
@@ -1693,27 +1693,28 @@ void change_console(struct vc_data *new_vc)
1693 */ 1693 */
1694 vc->vt_newvt = new_vc->vc_num; 1694 vc->vt_newvt = new_vc->vc_num;
1695 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { 1695 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
1696 if(vc->vt_mode.mode == VT_PROCESS)
1697 /*
1698 * It worked. Mark the vt to switch to and
1699 * return. The process needs to send us a
1700 * VT_RELDISP ioctl to complete the switch.
1701 */
1702 return;
1703 } else {
1696 /* 1704 /*
1697 * It worked. Mark the vt to switch to and 1705 * The controlling process has died, so we revert back to
1698 * return. The process needs to send us a 1706 * normal operation. In this case, we'll also change back
1699 * VT_RELDISP ioctl to complete the switch. 1707 * to KD_TEXT mode. I'm not sure if this is strictly correct
1708 * but it saves the agony when the X server dies and the screen
1709 * remains blanked due to KD_GRAPHICS! It would be nice to do
1710 * this outside of VT_PROCESS but there is no single process
1711 * to account for and tracking tty count may be undesirable.
1700 */ 1712 */
1701 return; 1713 reset_vc(vc);
1702 } 1714 }
1703 1715
1704 /* 1716 /*
1705 * The controlling process has died, so we revert back to 1717 * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch...
1706 * normal operation. In this case, we'll also change back
1707 * to KD_TEXT mode. I'm not sure if this is strictly correct
1708 * but it saves the agony when the X server dies and the screen
1709 * remains blanked due to KD_GRAPHICS! It would be nice to do
1710 * this outside of VT_PROCESS but there is no single process
1711 * to account for and tracking tty count may be undesirable.
1712 */
1713 reset_vc(vc);
1714
1715 /*
1716 * Fall through to normal (VT_AUTO) handling of the switch...
1717 */ 1718 */
1718 } 1719 }
1719 1720
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e02d74b1e892..c27f80e5d531 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -13,6 +13,22 @@ menuconfig DMADEVICES
13 DMA Device drivers supported by the configured arch, it may 13 DMA Device drivers supported by the configured arch, it may
14 be empty in some cases. 14 be empty in some cases.
15 15
16config DMADEVICES_DEBUG
17 bool "DMA Engine debugging"
18 depends on DMADEVICES != n
19 help
20 This is an option for use by developers; most people should
21 say N here. This enables DMA engine core and driver debugging.
22
23config DMADEVICES_VDEBUG
24 bool "DMA Engine verbose debugging"
25 depends on DMADEVICES_DEBUG != n
26 help
27 This is an option for use by developers; most people should
28 say N here. This enables deeper (more verbose) debugging of
29 the DMA engine core and drivers.
30
31
16if DMADEVICES 32if DMADEVICES
17 33
18comment "DMA Devices" 34comment "DMA Devices"
@@ -69,6 +85,13 @@ config FSL_DMA
69 The Elo is the DMA controller on some 82xx and 83xx parts, and the 85 The Elo is the DMA controller on some 82xx and 83xx parts, and the
70 Elo Plus is the DMA controller on 85xx and 86xx parts. 86 Elo Plus is the DMA controller on 85xx and 86xx parts.
71 87
88config MPC512X_DMA
89 tristate "Freescale MPC512x built-in DMA engine support"
90 depends on PPC_MPC512x
91 select DMA_ENGINE
92 ---help---
93 Enable support for the Freescale MPC512x built-in DMA engine.
94
72config MV_XOR 95config MV_XOR
73 bool "Marvell XOR engine support" 96 bool "Marvell XOR engine support"
74 depends on PLAT_ORION 97 depends on PLAT_ORION
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 807053d48232..22bba3d5e2b6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,9 +1,17 @@
1ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
2 EXTRA_CFLAGS += -DDEBUG
3endif
4ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
5 EXTRA_CFLAGS += -DVERBOSE_DEBUG
6endif
7
1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 8obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
2obj-$(CONFIG_NET_DMA) += iovlock.o 9obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_DMATEST) += dmatest.o 10obj-$(CONFIG_DMATEST) += dmatest.o
4obj-$(CONFIG_INTEL_IOATDMA) += ioat/ 11obj-$(CONFIG_INTEL_IOATDMA) += ioat/
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 12obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o 13obj-$(CONFIG_FSL_DMA) += fsldma.o
14obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
7obj-$(CONFIG_MV_XOR) += mv_xor.o 15obj-$(CONFIG_MV_XOR) += mv_xor.o
8obj-$(CONFIG_DW_DMAC) += dw_dmac.o 16obj-$(CONFIG_DW_DMAC) += dw_dmac.o
9obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 17obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 64a937262a40..1656fdcdb6c2 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,6 @@ struct coh901318_desc {
39 unsigned int sg_len; 39 unsigned int sg_len;
40 struct coh901318_lli *data; 40 struct coh901318_lli *data;
41 enum dma_data_direction dir; 41 enum dma_data_direction dir;
42 int pending_irqs;
43 unsigned long flags; 42 unsigned long flags;
44}; 43};
45 44
@@ -72,7 +71,6 @@ struct coh901318_chan {
72 71
73 unsigned long nbr_active_done; 72 unsigned long nbr_active_done;
74 unsigned long busy; 73 unsigned long busy;
75 int pending_irqs;
76 74
77 struct coh901318_base *base; 75 struct coh901318_base *base;
78}; 76};
@@ -80,18 +78,16 @@ struct coh901318_chan {
80static void coh901318_list_print(struct coh901318_chan *cohc, 78static void coh901318_list_print(struct coh901318_chan *cohc,
81 struct coh901318_lli *lli) 79 struct coh901318_lli *lli)
82{ 80{
83 struct coh901318_lli *l; 81 struct coh901318_lli *l = lli;
84 dma_addr_t addr = virt_to_phys(lli);
85 int i = 0; 82 int i = 0;
86 83
87 while (addr) { 84 while (l) {
88 l = phys_to_virt(addr);
89 dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" 85 dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
90 ", dst 0x%x, link 0x%x link_virt 0x%p\n", 86 ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
91 i, l, l->control, l->src_addr, l->dst_addr, 87 i, l, l->control, l->src_addr, l->dst_addr,
92 l->link_addr, phys_to_virt(l->link_addr)); 88 l->link_addr, l->virt_link_addr);
93 i++; 89 i++;
94 addr = l->link_addr; 90 l = l->virt_link_addr;
95 } 91 }
96} 92}
97 93
@@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
125 goto err_kmalloc; 121 goto err_kmalloc;
126 tmp = dev_buf; 122 tmp = dev_buf;
127 123
128 tmp += sprintf(tmp, "DMA -- enable dma channels\n"); 124 tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
129 125
130 for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) 126 for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
131 if (started_channels & (1 << i)) 127 if (started_channels & (1 << i))
@@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc)
337 * TODO: alloc a pile of descs instead of just one, 333 * TODO: alloc a pile of descs instead of just one,
338 * avoid many small allocations. 334 * avoid many small allocations.
339 */ 335 */
340 desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); 336 desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
341 if (desc == NULL) 337 if (desc == NULL)
342 goto out; 338 goto out;
343 INIT_LIST_HEAD(&desc->node); 339 INIT_LIST_HEAD(&desc->node);
340 dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
344 } else { 341 } else {
345 /* Reuse an old desc. */ 342 /* Reuse an old desc. */
346 desc = list_first_entry(&cohc->free, 343 desc = list_first_entry(&cohc->free,
347 struct coh901318_desc, 344 struct coh901318_desc,
348 node); 345 node);
349 list_del(&desc->node); 346 list_del(&desc->node);
347 /* Initialize it a bit so it's not insane */
348 desc->sg = NULL;
349 desc->sg_len = 0;
350 desc->desc.callback = NULL;
351 desc->desc.callback_param = NULL;
350 } 352 }
351 353
352 out: 354 out:
@@ -364,10 +366,6 @@ static void
364coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) 366coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
365{ 367{
366 list_add_tail(&desc->node, &cohc->active); 368 list_add_tail(&desc->node, &cohc->active);
367
368 BUG_ON(cohc->pending_irqs != 0);
369
370 cohc->pending_irqs = desc->pending_irqs;
371} 369}
372 370
373static struct coh901318_desc * 371static struct coh901318_desc *
@@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
592 return cohd_que; 590 return cohd_que;
593} 591}
594 592
593/*
594 * This tasklet is called from the interrupt handler to
595 * handle each descriptor (DMA job) that is sent to a channel.
596 */
595static void dma_tasklet(unsigned long data) 597static void dma_tasklet(unsigned long data)
596{ 598{
597 struct coh901318_chan *cohc = (struct coh901318_chan *) data; 599 struct coh901318_chan *cohc = (struct coh901318_chan *) data;
@@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data)
600 dma_async_tx_callback callback; 602 dma_async_tx_callback callback;
601 void *callback_param; 603 void *callback_param;
602 604
605 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
606 " nbr_active_done %ld\n", __func__,
607 cohc->id, cohc->nbr_active_done);
608
603 spin_lock_irqsave(&cohc->lock, flags); 609 spin_lock_irqsave(&cohc->lock, flags);
604 610
605 /* get first active entry from list */ 611 /* get first active descriptor entry from list */
606 cohd_fin = coh901318_first_active_get(cohc); 612 cohd_fin = coh901318_first_active_get(cohc);
607 613
608 BUG_ON(cohd_fin->pending_irqs == 0);
609
610 if (cohd_fin == NULL) 614 if (cohd_fin == NULL)
611 goto err; 615 goto err;
612 616
613 cohd_fin->pending_irqs--; 617 /* locate callback to client */
614 cohc->completed = cohd_fin->desc.cookie; 618 callback = cohd_fin->desc.callback;
619 callback_param = cohd_fin->desc.callback_param;
615 620
616 if (cohc->nbr_active_done == 0) 621 /* sign this job as completed on the channel */
617 return; 622 cohc->completed = cohd_fin->desc.cookie;
618 623
619 if (!cohd_fin->pending_irqs) { 624 /* release the lli allocation and remove the descriptor */
620 /* release the lli allocation*/ 625 coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
621 coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
622 }
623 626
624 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d" 627 /* return desc to free-list */
625 " nbr_active_done %ld\n", __func__, 628 coh901318_desc_remove(cohd_fin);
626 cohc->id, cohc->pending_irqs, cohc->nbr_active_done); 629 coh901318_desc_free(cohc, cohd_fin);
627 630
628 /* callback to client */ 631 spin_unlock_irqrestore(&cohc->lock, flags);
629 callback = cohd_fin->desc.callback;
630 callback_param = cohd_fin->desc.callback_param;
631
632 if (!cohd_fin->pending_irqs) {
633 coh901318_desc_remove(cohd_fin);
634 632
635 /* return desc to free-list */ 633 /* Call the callback when we're done */
636 coh901318_desc_free(cohc, cohd_fin); 634 if (callback)
637 } 635 callback(callback_param);
638 636
639 if (cohc->nbr_active_done) 637 spin_lock_irqsave(&cohc->lock, flags);
640 cohc->nbr_active_done--;
641 638
639 /*
640 * If another interrupt fired while the tasklet was scheduling,
641 * we don't get called twice, so we have this number of active
642 * counter that keep track of the number of IRQs expected to
643 * be handled for this channel. If there happen to be more than
644 * one IRQ to be ack:ed, we simply schedule this tasklet again.
645 */
646 cohc->nbr_active_done--;
642 if (cohc->nbr_active_done) { 647 if (cohc->nbr_active_done) {
648 dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
649 "came in while we were scheduling this tasklet\n");
643 if (cohc_chan_conf(cohc)->priority_high) 650 if (cohc_chan_conf(cohc)->priority_high)
644 tasklet_hi_schedule(&cohc->tasklet); 651 tasklet_hi_schedule(&cohc->tasklet);
645 else 652 else
646 tasklet_schedule(&cohc->tasklet); 653 tasklet_schedule(&cohc->tasklet);
647 } 654 }
648 spin_unlock_irqrestore(&cohc->lock, flags);
649 655
650 if (callback) 656 spin_unlock_irqrestore(&cohc->lock, flags);
651 callback(callback_param);
652 657
653 return; 658 return;
654 659
@@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
667 if (!cohc->allocated) 672 if (!cohc->allocated)
668 return; 673 return;
669 674
670 BUG_ON(cohc->pending_irqs == 0); 675 spin_lock(&cohc->lock);
671 676
672 cohc->pending_irqs--;
673 cohc->nbr_active_done++; 677 cohc->nbr_active_done++;
674 678
675 if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL) 679 if (coh901318_queue_start(cohc) == NULL)
676 cohc->busy = 0; 680 cohc->busy = 0;
677 681
678 BUG_ON(list_empty(&cohc->active)); 682 BUG_ON(list_empty(&cohc->active));
679 683
684 spin_unlock(&cohc->lock);
685
680 if (cohc_chan_conf(cohc)->priority_high) 686 if (cohc_chan_conf(cohc)->priority_high)
681 tasklet_hi_schedule(&cohc->tasklet); 687 tasklet_hi_schedule(&cohc->tasklet);
682 else 688 else
@@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
870 struct coh901318_chan *cohc = to_coh901318_chan(chan); 876 struct coh901318_chan *cohc = to_coh901318_chan(chan);
871 int lli_len; 877 int lli_len;
872 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; 878 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
879 int ret;
873 880
874 spin_lock_irqsave(&cohc->lock, flg); 881 spin_lock_irqsave(&cohc->lock, flg);
875 882
@@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
890 if (data == NULL) 897 if (data == NULL)
891 goto err; 898 goto err;
892 899
893 cohd = coh901318_desc_get(cohc); 900 ret = coh901318_lli_fill_memcpy(
894 cohd->sg = NULL; 901 &cohc->base->pool, data, src, size, dest,
895 cohd->sg_len = 0; 902 cohc_chan_param(cohc)->ctrl_lli_chained,
896 cohd->data = data; 903 ctrl_last);
897 904 if (ret)
898 cohd->pending_irqs = 905 goto err;
899 coh901318_lli_fill_memcpy(
900 &cohc->base->pool, data, src, size, dest,
901 cohc_chan_param(cohc)->ctrl_lli_chained,
902 ctrl_last);
903 cohd->flags = flags;
904 906
905 COH_DBG(coh901318_list_print(cohc, data)); 907 COH_DBG(coh901318_list_print(cohc, data));
906 908
907 dma_async_tx_descriptor_init(&cohd->desc, chan); 909 /* Pick a descriptor to handle this transfer */
908 910 cohd = coh901318_desc_get(cohc);
911 cohd->data = data;
912 cohd->flags = flags;
909 cohd->desc.tx_submit = coh901318_tx_submit; 913 cohd->desc.tx_submit = coh901318_tx_submit;
910 914
911 spin_unlock_irqrestore(&cohc->lock, flg); 915 spin_unlock_irqrestore(&cohc->lock, flg);
@@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
924 struct coh901318_chan *cohc = to_coh901318_chan(chan); 928 struct coh901318_chan *cohc = to_coh901318_chan(chan);
925 struct coh901318_lli *data; 929 struct coh901318_lli *data;
926 struct coh901318_desc *cohd; 930 struct coh901318_desc *cohd;
931 const struct coh901318_params *params;
927 struct scatterlist *sg; 932 struct scatterlist *sg;
928 int len = 0; 933 int len = 0;
929 int size; 934 int size;
@@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
931 u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; 936 u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
932 u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; 937 u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
933 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; 938 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
939 u32 config;
934 unsigned long flg; 940 unsigned long flg;
941 int ret;
935 942
936 if (!sgl) 943 if (!sgl)
937 goto out; 944 goto out;
@@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
947 /* Trigger interrupt after last lli */ 954 /* Trigger interrupt after last lli */
948 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; 955 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
949 956
950 cohd = coh901318_desc_get(cohc); 957 params = cohc_chan_param(cohc);
951 cohd->sg = NULL; 958 config = params->config;
952 cohd->sg_len = 0;
953 cohd->dir = direction;
954 959
955 if (direction == DMA_TO_DEVICE) { 960 if (direction == DMA_TO_DEVICE) {
956 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | 961 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
957 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; 962 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
958 963
964 config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
959 ctrl_chained |= tx_flags; 965 ctrl_chained |= tx_flags;
960 ctrl_last |= tx_flags; 966 ctrl_last |= tx_flags;
961 ctrl |= tx_flags; 967 ctrl |= tx_flags;
@@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
963 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | 969 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
964 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; 970 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
965 971
972 config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
966 ctrl_chained |= rx_flags; 973 ctrl_chained |= rx_flags;
967 ctrl_last |= rx_flags; 974 ctrl_last |= rx_flags;
968 ctrl |= rx_flags; 975 ctrl |= rx_flags;
969 } else 976 } else
970 goto err_direction; 977 goto err_direction;
971 978
972 dma_async_tx_descriptor_init(&cohd->desc, chan); 979 coh901318_set_conf(cohc, config);
973
974 cohd->desc.tx_submit = coh901318_tx_submit;
975
976 980
977 /* The dma only supports transmitting packages up to 981 /* The dma only supports transmitting packages up to
978 * MAX_DMA_PACKET_SIZE. Calculate to total number of 982 * MAX_DMA_PACKET_SIZE. Calculate to total number of
@@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
994 len += factor; 998 len += factor;
995 } 999 }
996 1000
1001 pr_debug("Allocate %d lli:s for this transfer\n", len);
997 data = coh901318_lli_alloc(&cohc->base->pool, len); 1002 data = coh901318_lli_alloc(&cohc->base->pool, len);
998 1003
999 if (data == NULL) 1004 if (data == NULL)
1000 goto err_dma_alloc; 1005 goto err_dma_alloc;
1001 1006
1002 /* initiate allocated data list */ 1007 /* initiate allocated data list */
1003 cohd->pending_irqs = 1008 ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
1004 coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, 1009 cohc_dev_addr(cohc),
1005 cohc_dev_addr(cohc), 1010 ctrl_chained,
1006 ctrl_chained, 1011 ctrl,
1007 ctrl, 1012 ctrl_last,
1008 ctrl_last, 1013 direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
1009 direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); 1014 if (ret)
1010 cohd->data = data; 1015 goto err_lli_fill;
1011
1012 cohd->flags = flags;
1013 1016
1014 COH_DBG(coh901318_list_print(cohc, data)); 1017 COH_DBG(coh901318_list_print(cohc, data));
1015 1018
1019 /* Pick a descriptor to handle this transfer */
1020 cohd = coh901318_desc_get(cohc);
1021 cohd->dir = direction;
1022 cohd->flags = flags;
1023 cohd->desc.tx_submit = coh901318_tx_submit;
1024 cohd->data = data;
1025
1016 spin_unlock_irqrestore(&cohc->lock, flg); 1026 spin_unlock_irqrestore(&cohc->lock, flg);
1017 1027
1018 return &cohd->desc; 1028 return &cohd->desc;
1029 err_lli_fill:
1019 err_dma_alloc: 1030 err_dma_alloc:
1020 err_direction: 1031 err_direction:
1021 coh901318_desc_remove(cohd);
1022 coh901318_desc_free(cohc, cohd);
1023 spin_unlock_irqrestore(&cohc->lock, flg); 1032 spin_unlock_irqrestore(&cohc->lock, flg);
1024 out: 1033 out:
1025 return NULL; 1034 return NULL;
@@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan)
1092 /* release the lli allocation*/ 1101 /* release the lli allocation*/
1093 coh901318_lli_free(&cohc->base->pool, &cohd->data); 1102 coh901318_lli_free(&cohc->base->pool, &cohd->data);
1094 1103
1095 coh901318_desc_remove(cohd);
1096
1097 /* return desc to free-list */ 1104 /* return desc to free-list */
1105 coh901318_desc_remove(cohd);
1098 coh901318_desc_free(cohc, cohd); 1106 coh901318_desc_free(cohc, cohd);
1099 } 1107 }
1100 1108
@@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan)
1102 /* release the lli allocation*/ 1110 /* release the lli allocation*/
1103 coh901318_lli_free(&cohc->base->pool, &cohd->data); 1111 coh901318_lli_free(&cohc->base->pool, &cohd->data);
1104 1112
1105 coh901318_desc_remove(cohd);
1106
1107 /* return desc to free-list */ 1113 /* return desc to free-list */
1114 coh901318_desc_remove(cohd);
1108 coh901318_desc_free(cohc, cohd); 1115 coh901318_desc_free(cohc, cohd);
1109 } 1116 }
1110 1117
1111 1118
1112 cohc->nbr_active_done = 0; 1119 cohc->nbr_active_done = 0;
1113 cohc->busy = 0; 1120 cohc->busy = 0;
1114 cohc->pending_irqs = 0;
1115 1121
1116 spin_unlock_irqrestore(&cohc->lock, flags); 1122 spin_unlock_irqrestore(&cohc->lock, flags);
1117} 1123}
@@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
1138 1144
1139 spin_lock_init(&cohc->lock); 1145 spin_lock_init(&cohc->lock);
1140 1146
1141 cohc->pending_irqs = 0;
1142 cohc->nbr_active_done = 0; 1147 cohc->nbr_active_done = 0;
1143 cohc->busy = 0; 1148 cohc->busy = 0;
1144 INIT_LIST_HEAD(&cohc->free); 1149 INIT_LIST_HEAD(&cohc->free);
@@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev)
1254 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 1259 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
1255 base->dma_memcpy.device_terminate_all = coh901318_terminate_all; 1260 base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
1256 base->dma_memcpy.dev = &pdev->dev; 1261 base->dma_memcpy.dev = &pdev->dev;
1262 /*
1263 * This controller can only access address at even 32bit boundaries,
1264 * i.e. 2^2
1265 */
1266 base->dma_memcpy.copy_align = 2;
1257 err = dma_async_device_register(&base->dma_memcpy); 1267 err = dma_async_device_register(&base->dma_memcpy);
1258 1268
1259 if (err) 1269 if (err)
1260 goto err_register_memcpy; 1270 goto err_register_memcpy;
1261 1271
1262 dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", 1272 dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
1263 (u32) base->virtbase); 1273 (u32) base->virtbase);
1264 1274
1265 return err; 1275 return err;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index f5120f238a4d..71d58c1a1e86 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
74 74
75 lli = head; 75 lli = head;
76 lli->phy_this = phy; 76 lli->phy_this = phy;
77 lli->link_addr = 0x00000000;
78 lli->virt_link_addr = 0x00000000U;
77 79
78 for (i = 1; i < len; i++) { 80 for (i = 1; i < len; i++) {
79 lli_prev = lli; 81 lli_prev = lli;
@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
85 87
86 DEBUGFS_POOL_COUNTER_ADD(pool, 1); 88 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
87 lli->phy_this = phy; 89 lli->phy_this = phy;
90 lli->link_addr = 0x00000000;
91 lli->virt_link_addr = 0x00000000U;
88 92
89 lli_prev->link_addr = phy; 93 lli_prev->link_addr = phy;
90 lli_prev->virt_link_addr = lli; 94 lli_prev->virt_link_addr = lli;
91 } 95 }
92 96
93 lli->link_addr = 0x00000000U;
94
95 spin_unlock(&pool->lock); 97 spin_unlock(&pool->lock);
96 98
97 return head; 99 return head;
@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
166 lli->src_addr = src; 168 lli->src_addr = src;
167 lli->dst_addr = dst; 169 lli->dst_addr = dst;
168 170
169 /* One irq per single transfer */ 171 return 0;
170 return 1;
171} 172}
172 173
173int 174int
@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
223 lli->src_addr = src; 224 lli->src_addr = src;
224 lli->dst_addr = dst; 225 lli->dst_addr = dst;
225 226
226 /* One irq per single transfer */ 227 return 0;
227 return 1;
228} 228}
229 229
230int 230int
@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
240 u32 ctrl_sg; 240 u32 ctrl_sg;
241 dma_addr_t src = 0; 241 dma_addr_t src = 0;
242 dma_addr_t dst = 0; 242 dma_addr_t dst = 0;
243 int nbr_of_irq = 0;
244 u32 bytes_to_transfer; 243 u32 bytes_to_transfer;
245 u32 elem_size; 244 u32 elem_size;
246 245
@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
269 ctrl_sg = ctrl ? ctrl : ctrl_last; 268 ctrl_sg = ctrl ? ctrl : ctrl_last;
270 269
271 270
272 if ((ctrl_sg & ctrl_irq_mask))
273 nbr_of_irq++;
274
275 if (dir == DMA_TO_DEVICE) 271 if (dir == DMA_TO_DEVICE)
276 /* increment source address */ 272 /* increment source address */
277 src = sg_dma_address(sg); 273 src = sg_phys(sg);
278 else 274 else
279 /* increment destination address */ 275 /* increment destination address */
280 dst = sg_dma_address(sg); 276 dst = sg_phys(sg);
281 277
282 bytes_to_transfer = sg_dma_len(sg); 278 bytes_to_transfer = sg_dma_len(sg);
283 279
@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
310 } 306 }
311 spin_unlock(&pool->lock); 307 spin_unlock(&pool->lock);
312 308
313 /* There can be many IRQs per sg transfer */ 309 return 0;
314 return nbr_of_irq;
315 err: 310 err:
316 spin_unlock(&pool->lock); 311 spin_unlock(&pool->lock);
317 return -EINVAL; 312 return -EINVAL;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e7a3230fb7d5..87399cafce37 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -284,7 +284,7 @@ struct dma_chan_tbl_ent {
284/** 284/**
285 * channel_table - percpu lookup table for memory-to-memory offload providers 285 * channel_table - percpu lookup table for memory-to-memory offload providers
286 */ 286 */
287static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; 287static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
288 288
289static int __init dma_channel_table_init(void) 289static int __init dma_channel_table_init(void)
290{ 290{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 948d563941c9..6fa55fe3dd24 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -237,7 +237,7 @@ static int dmatest_func(void *data)
237 dma_cookie_t cookie; 237 dma_cookie_t cookie;
238 enum dma_status status; 238 enum dma_status status;
239 enum dma_ctrl_flags flags; 239 enum dma_ctrl_flags flags;
240 u8 pq_coefs[pq_sources]; 240 u8 pq_coefs[pq_sources + 1];
241 int ret; 241 int ret;
242 int src_cnt; 242 int src_cnt;
243 int dst_cnt; 243 int dst_cnt;
@@ -257,7 +257,7 @@ static int dmatest_func(void *data)
257 } else if (thread->type == DMA_PQ) { 257 } else if (thread->type == DMA_PQ) {
258 src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ 258 src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
259 dst_cnt = 2; 259 dst_cnt = 2;
260 for (i = 0; i < pq_sources; i++) 260 for (i = 0; i < src_cnt; i++)
261 pq_coefs[i] = 1; 261 pq_coefs[i] = 1;
262 } else 262 } else
263 goto err_srcs; 263 goto err_srcs;
@@ -347,7 +347,7 @@ static int dmatest_func(void *data)
347 else if (thread->type == DMA_XOR) 347 else if (thread->type == DMA_XOR)
348 tx = dev->device_prep_dma_xor(chan, 348 tx = dev->device_prep_dma_xor(chan,
349 dma_dsts[0] + dst_off, 349 dma_dsts[0] + dst_off,
350 dma_srcs, xor_sources, 350 dma_srcs, src_cnt,
351 len, flags); 351 len, flags);
352 else if (thread->type == DMA_PQ) { 352 else if (thread->type == DMA_PQ) {
353 dma_addr_t dma_pq[dst_cnt]; 353 dma_addr_t dma_pq[dst_cnt];
@@ -355,7 +355,7 @@ static int dmatest_func(void *data)
355 for (i = 0; i < dst_cnt; i++) 355 for (i = 0; i < dst_cnt; i++)
356 dma_pq[i] = dma_dsts[i] + dst_off; 356 dma_pq[i] = dma_dsts[i] + dst_off;
357 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, 357 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
358 pq_sources, pq_coefs, 358 src_cnt, pq_coefs,
359 len, flags); 359 len, flags);
360 } 360 }
361 361
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 296f9e747fac..bbb4be5a3ff4 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,19 +37,19 @@
37#include <asm/fsldma.h> 37#include <asm/fsldma.h>
38#include "fsldma.h" 38#include "fsldma.h"
39 39
40static void dma_init(struct fsl_dma_chan *fsl_chan) 40static void dma_init(struct fsldma_chan *chan)
41{ 41{
42 /* Reset the channel */ 42 /* Reset the channel */
43 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); 43 DMA_OUT(chan, &chan->regs->mr, 0, 32);
44 44
45 switch (fsl_chan->feature & FSL_DMA_IP_MASK) { 45 switch (chan->feature & FSL_DMA_IP_MASK) {
46 case FSL_DMA_IP_85XX: 46 case FSL_DMA_IP_85XX:
47 /* Set the channel to below modes: 47 /* Set the channel to below modes:
48 * EIE - Error interrupt enable 48 * EIE - Error interrupt enable
49 * EOSIE - End of segments interrupt enable (basic mode) 49 * EOSIE - End of segments interrupt enable (basic mode)
50 * EOLNIE - End of links interrupt enable 50 * EOLNIE - End of links interrupt enable
51 */ 51 */
52 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE 52 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
53 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); 53 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
54 break; 54 break;
55 case FSL_DMA_IP_83XX: 55 case FSL_DMA_IP_83XX:
@@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
57 * EOTIE - End-of-transfer interrupt enable 57 * EOTIE - End-of-transfer interrupt enable
58 * PRC_RM - PCI read multiple 58 * PRC_RM - PCI read multiple
59 */ 59 */
60 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE 60 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
61 | FSL_DMA_MR_PRC_RM, 32); 61 | FSL_DMA_MR_PRC_RM, 32);
62 break; 62 break;
63 } 63 }
64
65} 64}
66 65
67static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) 66static void set_sr(struct fsldma_chan *chan, u32 val)
68{ 67{
69 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 68 DMA_OUT(chan, &chan->regs->sr, val, 32);
70} 69}
71 70
72static u32 get_sr(struct fsl_dma_chan *fsl_chan) 71static u32 get_sr(struct fsldma_chan *chan)
73{ 72{
74 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 73 return DMA_IN(chan, &chan->regs->sr, 32);
75} 74}
76 75
77static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, 76static void set_desc_cnt(struct fsldma_chan *chan,
78 struct fsl_dma_ld_hw *hw, u32 count) 77 struct fsl_dma_ld_hw *hw, u32 count)
79{ 78{
80 hw->count = CPU_TO_DMA(fsl_chan, count, 32); 79 hw->count = CPU_TO_DMA(chan, count, 32);
81} 80}
82 81
83static void set_desc_src(struct fsl_dma_chan *fsl_chan, 82static void set_desc_src(struct fsldma_chan *chan,
84 struct fsl_dma_ld_hw *hw, dma_addr_t src) 83 struct fsl_dma_ld_hw *hw, dma_addr_t src)
85{ 84{
86 u64 snoop_bits; 85 u64 snoop_bits;
87 86
88 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 87 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
89 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 88 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
90 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); 89 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
91} 90}
92 91
93static void set_desc_dest(struct fsl_dma_chan *fsl_chan, 92static void set_desc_dst(struct fsldma_chan *chan,
94 struct fsl_dma_ld_hw *hw, dma_addr_t dest) 93 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
95{ 94{
96 u64 snoop_bits; 95 u64 snoop_bits;
97 96
98 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 97 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
99 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 98 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
100 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); 99 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
101} 100}
102 101
103static void set_desc_next(struct fsl_dma_chan *fsl_chan, 102static void set_desc_next(struct fsldma_chan *chan,
104 struct fsl_dma_ld_hw *hw, dma_addr_t next) 103 struct fsl_dma_ld_hw *hw, dma_addr_t next)
105{ 104{
106 u64 snoop_bits; 105 u64 snoop_bits;
107 106
108 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 107 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
109 ? FSL_DMA_SNEN : 0; 108 ? FSL_DMA_SNEN : 0;
110 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); 109 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
111}
112
113static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
114{
115 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
116} 110}
117 111
118static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) 112static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
119{ 113{
120 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; 114 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
121} 115}
122 116
123static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 117static dma_addr_t get_cdar(struct fsldma_chan *chan)
124{ 118{
125 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); 119 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
126} 120}
127 121
128static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) 122static dma_addr_t get_ndar(struct fsldma_chan *chan)
129{ 123{
130 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); 124 return DMA_IN(chan, &chan->regs->ndar, 64);
131} 125}
132 126
133static u32 get_bcr(struct fsl_dma_chan *fsl_chan) 127static u32 get_bcr(struct fsldma_chan *chan)
134{ 128{
135 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); 129 return DMA_IN(chan, &chan->regs->bcr, 32);
136} 130}
137 131
138static int dma_is_idle(struct fsl_dma_chan *fsl_chan) 132static int dma_is_idle(struct fsldma_chan *chan)
139{ 133{
140 u32 sr = get_sr(fsl_chan); 134 u32 sr = get_sr(chan);
141 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 135 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
142} 136}
143 137
144static void dma_start(struct fsl_dma_chan *fsl_chan) 138static void dma_start(struct fsldma_chan *chan)
145{ 139{
146 u32 mr_set = 0; 140 u32 mode;
147 141
148 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 142 mode = DMA_IN(chan, &chan->regs->mr, 32);
149 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); 143
150 mr_set |= FSL_DMA_MR_EMP_EN; 144 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
151 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 145 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
152 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 146 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
153 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 147 mode |= FSL_DMA_MR_EMP_EN;
154 & ~FSL_DMA_MR_EMP_EN, 32); 148 } else {
149 mode &= ~FSL_DMA_MR_EMP_EN;
150 }
155 } 151 }
156 152
157 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) 153 if (chan->feature & FSL_DMA_CHAN_START_EXT)
158 mr_set |= FSL_DMA_MR_EMS_EN; 154 mode |= FSL_DMA_MR_EMS_EN;
159 else 155 else
160 mr_set |= FSL_DMA_MR_CS; 156 mode |= FSL_DMA_MR_CS;
161 157
162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 158 DMA_OUT(chan, &chan->regs->mr, mode, 32);
163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
164 | mr_set, 32);
165} 159}
166 160
167static void dma_halt(struct fsl_dma_chan *fsl_chan) 161static void dma_halt(struct fsldma_chan *chan)
168{ 162{
163 u32 mode;
169 int i; 164 int i;
170 165
171 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 166 mode = DMA_IN(chan, &chan->regs->mr, 32);
172 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, 167 mode |= FSL_DMA_MR_CA;
173 32); 168 DMA_OUT(chan, &chan->regs->mr, mode, 32);
174 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 169
175 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS 170 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
176 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); 171 DMA_OUT(chan, &chan->regs->mr, mode, 32);
177 172
178 for (i = 0; i < 100; i++) { 173 for (i = 0; i < 100; i++) {
179 if (dma_is_idle(fsl_chan)) 174 if (dma_is_idle(chan))
180 break; 175 return;
176
181 udelay(10); 177 udelay(10);
182 } 178 }
183 if (i >= 100 && !dma_is_idle(fsl_chan)) 179
184 dev_err(fsl_chan->dev, "DMA halt timeout!\n"); 180 if (!dma_is_idle(chan))
181 dev_err(chan->dev, "DMA halt timeout!\n");
185} 182}
186 183
187static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 184static void set_ld_eol(struct fsldma_chan *chan,
188 struct fsl_desc_sw *desc) 185 struct fsl_desc_sw *desc)
189{ 186{
190 u64 snoop_bits; 187 u64 snoop_bits;
191 188
192 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 189 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
193 ? FSL_DMA_SNEN : 0; 190 ? FSL_DMA_SNEN : 0;
194 191
195 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 192 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
196 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 193 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
197 | snoop_bits, 64); 194 | snoop_bits, 64);
198} 195}
199 196
200static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
201 struct fsl_desc_sw *new_desc)
202{
203 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
204
205 if (list_empty(&fsl_chan->ld_queue))
206 return;
207
208 /* Link to the new descriptor physical address and
209 * Enable End-of-segment interrupt for
210 * the last link descriptor.
211 * (the previous node's next link descriptor)
212 *
213 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
214 */
215 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
216 new_desc->async_tx.phys | FSL_DMA_EOSIE |
217 (((fsl_chan->feature & FSL_DMA_IP_MASK)
218 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
219}
220
221/** 197/**
222 * fsl_chan_set_src_loop_size - Set source address hold transfer size 198 * fsl_chan_set_src_loop_size - Set source address hold transfer size
223 * @fsl_chan : Freescale DMA channel 199 * @chan : Freescale DMA channel
224 * @size : Address loop size, 0 for disable loop 200 * @size : Address loop size, 0 for disable loop
225 * 201 *
226 * The set source address hold transfer size. The source 202 * The set source address hold transfer size. The source
@@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
229 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 205 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
230 * SA + 1 ... and so on. 206 * SA + 1 ... and so on.
231 */ 207 */
232static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) 208static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
233{ 209{
210 u32 mode;
211
212 mode = DMA_IN(chan, &chan->regs->mr, 32);
213
234 switch (size) { 214 switch (size) {
235 case 0: 215 case 0:
236 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 216 mode &= ~FSL_DMA_MR_SAHE;
237 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
238 (~FSL_DMA_MR_SAHE), 32);
239 break; 217 break;
240 case 1: 218 case 1:
241 case 2: 219 case 2:
242 case 4: 220 case 4:
243 case 8: 221 case 8:
244 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 222 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
245 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
246 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
247 32);
248 break; 223 break;
249 } 224 }
225
226 DMA_OUT(chan, &chan->regs->mr, mode, 32);
250} 227}
251 228
252/** 229/**
253 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size 230 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
254 * @fsl_chan : Freescale DMA channel 231 * @chan : Freescale DMA channel
255 * @size : Address loop size, 0 for disable loop 232 * @size : Address loop size, 0 for disable loop
256 * 233 *
257 * The set destination address hold transfer size. The destination 234 * The set destination address hold transfer size. The destination
@@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
260 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 237 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
261 * TA + 1 ... and so on. 238 * TA + 1 ... and so on.
262 */ 239 */
263static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) 240static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
264{ 241{
242 u32 mode;
243
244 mode = DMA_IN(chan, &chan->regs->mr, 32);
245
265 switch (size) { 246 switch (size) {
266 case 0: 247 case 0:
267 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 248 mode &= ~FSL_DMA_MR_DAHE;
268 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
269 (~FSL_DMA_MR_DAHE), 32);
270 break; 249 break;
271 case 1: 250 case 1:
272 case 2: 251 case 2:
273 case 4: 252 case 4:
274 case 8: 253 case 8:
275 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 254 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
276 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
277 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
278 32);
279 break; 255 break;
280 } 256 }
257
258 DMA_OUT(chan, &chan->regs->mr, mode, 32);
281} 259}
282 260
283/** 261/**
284 * fsl_chan_set_request_count - Set DMA Request Count for external control 262 * fsl_chan_set_request_count - Set DMA Request Count for external control
285 * @fsl_chan : Freescale DMA channel 263 * @chan : Freescale DMA channel
286 * @size : Number of bytes to transfer in a single request 264 * @size : Number of bytes to transfer in a single request
287 * 265 *
288 * The Freescale DMA channel can be controlled by the external signal DREQ#. 266 * The Freescale DMA channel can be controlled by the external signal DREQ#.
@@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
292 * 270 *
293 * A size of 0 disables external pause control. The maximum size is 1024. 271 * A size of 0 disables external pause control. The maximum size is 1024.
294 */ 272 */
295static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) 273static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
296{ 274{
275 u32 mode;
276
297 BUG_ON(size > 1024); 277 BUG_ON(size > 1024);
298 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 278
299 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 279 mode = DMA_IN(chan, &chan->regs->mr, 32);
300 | ((__ilog2(size) << 24) & 0x0f000000), 280 mode |= (__ilog2(size) << 24) & 0x0f000000;
301 32); 281
282 DMA_OUT(chan, &chan->regs->mr, mode, 32);
302} 283}
303 284
304/** 285/**
305 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 286 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
306 * @fsl_chan : Freescale DMA channel 287 * @chan : Freescale DMA channel
307 * @enable : 0 is disabled, 1 is enabled. 288 * @enable : 0 is disabled, 1 is enabled.
308 * 289 *
309 * The Freescale DMA channel can be controlled by the external signal DREQ#. 290 * The Freescale DMA channel can be controlled by the external signal DREQ#.
310 * The DMA Request Count feature should be used in addition to this feature 291 * The DMA Request Count feature should be used in addition to this feature
311 * to set the number of bytes to transfer before pausing the channel. 292 * to set the number of bytes to transfer before pausing the channel.
312 */ 293 */
313static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) 294static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
314{ 295{
315 if (enable) 296 if (enable)
316 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 297 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
317 else 298 else
318 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 299 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
319} 300}
320 301
321/** 302/**
322 * fsl_chan_toggle_ext_start - Toggle channel external start status 303 * fsl_chan_toggle_ext_start - Toggle channel external start status
323 * @fsl_chan : Freescale DMA channel 304 * @chan : Freescale DMA channel
324 * @enable : 0 is disabled, 1 is enabled. 305 * @enable : 0 is disabled, 1 is enabled.
325 * 306 *
326 * If enable the external start, the channel can be started by an 307 * If enable the external start, the channel can be started by an
@@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
328 * transfer immediately. The DMA channel will wait for the 309 * transfer immediately. The DMA channel will wait for the
329 * control pin asserted. 310 * control pin asserted.
330 */ 311 */
331static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) 312static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
332{ 313{
333 if (enable) 314 if (enable)
334 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; 315 chan->feature |= FSL_DMA_CHAN_START_EXT;
335 else 316 else
336 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; 317 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
318}
319
320static void append_ld_queue(struct fsldma_chan *chan,
321 struct fsl_desc_sw *desc)
322{
323 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
324
325 if (list_empty(&chan->ld_pending))
326 goto out_splice;
327
328 /*
329 * Add the hardware descriptor to the chain of hardware descriptors
330 * that already exists in memory.
331 *
332 * This will un-set the EOL bit of the existing transaction, and the
333 * last link in this transaction will become the EOL descriptor.
334 */
335 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
336
337 /*
338 * Add the software descriptor and all children to the list
339 * of pending transactions
340 */
341out_splice:
342 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
337} 343}
338 344
339static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 345static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
340{ 346{
341 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 347 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
342 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 348 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
343 struct fsl_desc_sw *child; 349 struct fsl_desc_sw *child;
344 unsigned long flags; 350 unsigned long flags;
345 dma_cookie_t cookie; 351 dma_cookie_t cookie;
346 352
347 /* cookie increment and adding to ld_queue must be atomic */ 353 spin_lock_irqsave(&chan->desc_lock, flags);
348 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
349 354
350 cookie = fsl_chan->common.cookie; 355 /*
356 * assign cookies to all of the software descriptors
357 * that make up this transaction
358 */
359 cookie = chan->common.cookie;
351 list_for_each_entry(child, &desc->tx_list, node) { 360 list_for_each_entry(child, &desc->tx_list, node) {
352 cookie++; 361 cookie++;
353 if (cookie < 0) 362 if (cookie < 0)
354 cookie = 1; 363 cookie = 1;
355 364
356 desc->async_tx.cookie = cookie; 365 child->async_tx.cookie = cookie;
357 } 366 }
358 367
359 fsl_chan->common.cookie = cookie; 368 chan->common.cookie = cookie;
360 append_ld_queue(fsl_chan, desc); 369
361 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); 370 /* put this transaction onto the tail of the pending queue */
371 append_ld_queue(chan, desc);
362 372
363 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 373 spin_unlock_irqrestore(&chan->desc_lock, flags);
364 374
365 return cookie; 375 return cookie;
366} 376}
367 377
368/** 378/**
369 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 379 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
370 * @fsl_chan : Freescale DMA channel 380 * @chan : Freescale DMA channel
371 * 381 *
372 * Return - The descriptor allocated. NULL for failed. 382 * Return - The descriptor allocated. NULL for failed.
373 */ 383 */
374static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 384static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
375 struct fsl_dma_chan *fsl_chan) 385 struct fsldma_chan *chan)
376{ 386{
387 struct fsl_desc_sw *desc;
377 dma_addr_t pdesc; 388 dma_addr_t pdesc;
378 struct fsl_desc_sw *desc_sw; 389
379 390 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
380 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); 391 if (!desc) {
381 if (desc_sw) { 392 dev_dbg(chan->dev, "out of memory for link desc\n");
382 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 393 return NULL;
383 INIT_LIST_HEAD(&desc_sw->tx_list);
384 dma_async_tx_descriptor_init(&desc_sw->async_tx,
385 &fsl_chan->common);
386 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
387 desc_sw->async_tx.phys = pdesc;
388 } 394 }
389 395
390 return desc_sw; 396 memset(desc, 0, sizeof(*desc));
397 INIT_LIST_HEAD(&desc->tx_list);
398 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
399 desc->async_tx.tx_submit = fsl_dma_tx_submit;
400 desc->async_tx.phys = pdesc;
401
402 return desc;
391} 403}
392 404
393 405
394/** 406/**
395 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 407 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
396 * @fsl_chan : Freescale DMA channel 408 * @chan : Freescale DMA channel
397 * 409 *
398 * This function will create a dma pool for descriptor allocation. 410 * This function will create a dma pool for descriptor allocation.
399 * 411 *
400 * Return - The number of descriptors allocated. 412 * Return - The number of descriptors allocated.
401 */ 413 */
402static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 414static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
403{ 415{
404 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 416 struct fsldma_chan *chan = to_fsl_chan(dchan);
405 417
406 /* Has this channel already been allocated? */ 418 /* Has this channel already been allocated? */
407 if (fsl_chan->desc_pool) 419 if (chan->desc_pool)
408 return 1; 420 return 1;
409 421
410 /* We need the descriptor to be aligned to 32bytes 422 /*
423 * We need the descriptor to be aligned to 32bytes
411 * for meeting FSL DMA specification requirement. 424 * for meeting FSL DMA specification requirement.
412 */ 425 */
413 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 426 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
414 fsl_chan->dev, sizeof(struct fsl_desc_sw), 427 chan->dev,
415 32, 0); 428 sizeof(struct fsl_desc_sw),
416 if (!fsl_chan->desc_pool) { 429 __alignof__(struct fsl_desc_sw), 0);
417 dev_err(fsl_chan->dev, "No memory for channel %d " 430 if (!chan->desc_pool) {
418 "descriptor dma pool.\n", fsl_chan->id); 431 dev_err(chan->dev, "unable to allocate channel %d "
419 return 0; 432 "descriptor pool\n", chan->id);
433 return -ENOMEM;
420 } 434 }
421 435
436 /* there is at least one descriptor free to be allocated */
422 return 1; 437 return 1;
423} 438}
424 439
425/** 440/**
426 * fsl_dma_free_chan_resources - Free all resources of the channel. 441 * fsldma_free_desc_list - Free all descriptors in a queue
427 * @fsl_chan : Freescale DMA channel 442 * @chan: Freescae DMA channel
443 * @list: the list to free
444 *
445 * LOCKING: must hold chan->desc_lock
428 */ 446 */
429static void fsl_dma_free_chan_resources(struct dma_chan *chan) 447static void fsldma_free_desc_list(struct fsldma_chan *chan,
448 struct list_head *list)
430{ 449{
431 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
432 struct fsl_desc_sw *desc, *_desc; 450 struct fsl_desc_sw *desc, *_desc;
433 unsigned long flags;
434 451
435 dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); 452 list_for_each_entry_safe(desc, _desc, list, node) {
436 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 453 list_del(&desc->node);
437 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 454 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
438#ifdef FSL_DMA_LD_DEBUG 455 }
439 dev_dbg(fsl_chan->dev, 456}
440 "LD %p will be released.\n", desc); 457
441#endif 458static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
459 struct list_head *list)
460{
461 struct fsl_desc_sw *desc, *_desc;
462
463 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
442 list_del(&desc->node); 464 list_del(&desc->node);
443 /* free link descriptor */ 465 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
444 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
445 } 466 }
446 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 467}
447 dma_pool_destroy(fsl_chan->desc_pool); 468
469/**
470 * fsl_dma_free_chan_resources - Free all resources of the channel.
471 * @chan : Freescale DMA channel
472 */
473static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
474{
475 struct fsldma_chan *chan = to_fsl_chan(dchan);
476 unsigned long flags;
477
478 dev_dbg(chan->dev, "Free all channel resources.\n");
479 spin_lock_irqsave(&chan->desc_lock, flags);
480 fsldma_free_desc_list(chan, &chan->ld_pending);
481 fsldma_free_desc_list(chan, &chan->ld_running);
482 spin_unlock_irqrestore(&chan->desc_lock, flags);
448 483
449 fsl_chan->desc_pool = NULL; 484 dma_pool_destroy(chan->desc_pool);
485 chan->desc_pool = NULL;
450} 486}
451 487
452static struct dma_async_tx_descriptor * 488static struct dma_async_tx_descriptor *
453fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) 489fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
454{ 490{
455 struct fsl_dma_chan *fsl_chan; 491 struct fsldma_chan *chan;
456 struct fsl_desc_sw *new; 492 struct fsl_desc_sw *new;
457 493
458 if (!chan) 494 if (!dchan)
459 return NULL; 495 return NULL;
460 496
461 fsl_chan = to_fsl_chan(chan); 497 chan = to_fsl_chan(dchan);
462 498
463 new = fsl_dma_alloc_descriptor(fsl_chan); 499 new = fsl_dma_alloc_descriptor(chan);
464 if (!new) { 500 if (!new) {
465 dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); 501 dev_err(chan->dev, "No free memory for link descriptor\n");
466 return NULL; 502 return NULL;
467 } 503 }
468 504
@@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
473 list_add_tail(&new->node, &new->tx_list); 509 list_add_tail(&new->node, &new->tx_list);
474 510
475 /* Set End-of-link to the last link descriptor of new list*/ 511 /* Set End-of-link to the last link descriptor of new list*/
476 set_ld_eol(fsl_chan, new); 512 set_ld_eol(chan, new);
477 513
478 return &new->async_tx; 514 return &new->async_tx;
479} 515}
480 516
481static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 517static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
482 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 518 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
483 size_t len, unsigned long flags) 519 size_t len, unsigned long flags)
484{ 520{
485 struct fsl_dma_chan *fsl_chan; 521 struct fsldma_chan *chan;
486 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 522 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
487 struct list_head *list;
488 size_t copy; 523 size_t copy;
489 524
490 if (!chan) 525 if (!dchan)
491 return NULL; 526 return NULL;
492 527
493 if (!len) 528 if (!len)
494 return NULL; 529 return NULL;
495 530
496 fsl_chan = to_fsl_chan(chan); 531 chan = to_fsl_chan(dchan);
497 532
498 do { 533 do {
499 534
500 /* Allocate the link descriptor from DMA pool */ 535 /* Allocate the link descriptor from DMA pool */
501 new = fsl_dma_alloc_descriptor(fsl_chan); 536 new = fsl_dma_alloc_descriptor(chan);
502 if (!new) { 537 if (!new) {
503 dev_err(fsl_chan->dev, 538 dev_err(chan->dev,
504 "No free memory for link descriptor\n"); 539 "No free memory for link descriptor\n");
505 goto fail; 540 goto fail;
506 } 541 }
507#ifdef FSL_DMA_LD_DEBUG 542#ifdef FSL_DMA_LD_DEBUG
508 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 543 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
509#endif 544#endif
510 545
511 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 546 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
512 547
513 set_desc_cnt(fsl_chan, &new->hw, copy); 548 set_desc_cnt(chan, &new->hw, copy);
514 set_desc_src(fsl_chan, &new->hw, dma_src); 549 set_desc_src(chan, &new->hw, dma_src);
515 set_desc_dest(fsl_chan, &new->hw, dma_dest); 550 set_desc_dst(chan, &new->hw, dma_dst);
516 551
517 if (!first) 552 if (!first)
518 first = new; 553 first = new;
519 else 554 else
520 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); 555 set_desc_next(chan, &prev->hw, new->async_tx.phys);
521 556
522 new->async_tx.cookie = 0; 557 new->async_tx.cookie = 0;
523 async_tx_ack(&new->async_tx); 558 async_tx_ack(&new->async_tx);
@@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
525 prev = new; 560 prev = new;
526 len -= copy; 561 len -= copy;
527 dma_src += copy; 562 dma_src += copy;
528 dma_dest += copy; 563 dma_dst += copy;
529 564
530 /* Insert the link descriptor to the LD ring */ 565 /* Insert the link descriptor to the LD ring */
531 list_add_tail(&new->node, &first->tx_list); 566 list_add_tail(&new->node, &first->tx_list);
@@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
535 new->async_tx.cookie = -EBUSY; 570 new->async_tx.cookie = -EBUSY;
536 571
537 /* Set End-of-link to the last link descriptor of new list*/ 572 /* Set End-of-link to the last link descriptor of new list*/
538 set_ld_eol(fsl_chan, new); 573 set_ld_eol(chan, new);
539 574
540 return &first->async_tx; 575 return &first->async_tx;
541 576
@@ -543,12 +578,7 @@ fail:
543 if (!first) 578 if (!first)
544 return NULL; 579 return NULL;
545 580
546 list = &first->tx_list; 581 fsldma_free_desc_list_reverse(chan, &first->tx_list);
547 list_for_each_entry_safe_reverse(new, prev, list, node) {
548 list_del(&new->node);
549 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
550 }
551
552 return NULL; 582 return NULL;
553} 583}
554 584
@@ -565,13 +595,12 @@ fail:
565 * chan->private variable. 595 * chan->private variable.
566 */ 596 */
567static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 597static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
568 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 598 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
569 enum dma_data_direction direction, unsigned long flags) 599 enum dma_data_direction direction, unsigned long flags)
570{ 600{
571 struct fsl_dma_chan *fsl_chan; 601 struct fsldma_chan *chan;
572 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 602 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
573 struct fsl_dma_slave *slave; 603 struct fsl_dma_slave *slave;
574 struct list_head *tx_list;
575 size_t copy; 604 size_t copy;
576 605
577 int i; 606 int i;
@@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
581 struct fsl_dma_hw_addr *hw; 610 struct fsl_dma_hw_addr *hw;
582 dma_addr_t dma_dst, dma_src; 611 dma_addr_t dma_dst, dma_src;
583 612
584 if (!chan) 613 if (!dchan)
585 return NULL; 614 return NULL;
586 615
587 if (!chan->private) 616 if (!dchan->private)
588 return NULL; 617 return NULL;
589 618
590 fsl_chan = to_fsl_chan(chan); 619 chan = to_fsl_chan(dchan);
591 slave = chan->private; 620 slave = dchan->private;
592 621
593 if (list_empty(&slave->addresses)) 622 if (list_empty(&slave->addresses))
594 return NULL; 623 return NULL;
@@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
637 } 666 }
638 667
639 /* Allocate the link descriptor from DMA pool */ 668 /* Allocate the link descriptor from DMA pool */
640 new = fsl_dma_alloc_descriptor(fsl_chan); 669 new = fsl_dma_alloc_descriptor(chan);
641 if (!new) { 670 if (!new) {
642 dev_err(fsl_chan->dev, "No free memory for " 671 dev_err(chan->dev, "No free memory for "
643 "link descriptor\n"); 672 "link descriptor\n");
644 goto fail; 673 goto fail;
645 } 674 }
646#ifdef FSL_DMA_LD_DEBUG 675#ifdef FSL_DMA_LD_DEBUG
647 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 676 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
648#endif 677#endif
649 678
650 /* 679 /*
@@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
671 } 700 }
672 701
673 /* Fill in the descriptor */ 702 /* Fill in the descriptor */
674 set_desc_cnt(fsl_chan, &new->hw, copy); 703 set_desc_cnt(chan, &new->hw, copy);
675 set_desc_src(fsl_chan, &new->hw, dma_src); 704 set_desc_src(chan, &new->hw, dma_src);
676 set_desc_dest(fsl_chan, &new->hw, dma_dst); 705 set_desc_dst(chan, &new->hw, dma_dst);
677 706
678 /* 707 /*
679 * If this is not the first descriptor, chain the 708 * If this is not the first descriptor, chain the
@@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
682 if (!first) { 711 if (!first) {
683 first = new; 712 first = new;
684 } else { 713 } else {
685 set_desc_next(fsl_chan, &prev->hw, 714 set_desc_next(chan, &prev->hw,
686 new->async_tx.phys); 715 new->async_tx.phys);
687 } 716 }
688 717
@@ -708,23 +737,23 @@ finished:
708 new->async_tx.cookie = -EBUSY; 737 new->async_tx.cookie = -EBUSY;
709 738
710 /* Set End-of-link to the last link descriptor of new list */ 739 /* Set End-of-link to the last link descriptor of new list */
711 set_ld_eol(fsl_chan, new); 740 set_ld_eol(chan, new);
712 741
713 /* Enable extra controller features */ 742 /* Enable extra controller features */
714 if (fsl_chan->set_src_loop_size) 743 if (chan->set_src_loop_size)
715 fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); 744 chan->set_src_loop_size(chan, slave->src_loop_size);
716 745
717 if (fsl_chan->set_dest_loop_size) 746 if (chan->set_dst_loop_size)
718 fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); 747 chan->set_dst_loop_size(chan, slave->dst_loop_size);
719 748
720 if (fsl_chan->toggle_ext_start) 749 if (chan->toggle_ext_start)
721 fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); 750 chan->toggle_ext_start(chan, slave->external_start);
722 751
723 if (fsl_chan->toggle_ext_pause) 752 if (chan->toggle_ext_pause)
724 fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); 753 chan->toggle_ext_pause(chan, slave->external_pause);
725 754
726 if (fsl_chan->set_request_count) 755 if (chan->set_request_count)
727 fsl_chan->set_request_count(fsl_chan, slave->request_count); 756 chan->set_request_count(chan, slave->request_count);
728 757
729 return &first->async_tx; 758 return &first->async_tx;
730 759
@@ -741,215 +770,216 @@ fail:
741 * 770 *
742 * We're re-using variables for the loop, oh well 771 * We're re-using variables for the loop, oh well
743 */ 772 */
744 tx_list = &first->tx_list; 773 fsldma_free_desc_list_reverse(chan, &first->tx_list);
745 list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
746 list_del_init(&new->node);
747 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
748 }
749
750 return NULL; 774 return NULL;
751} 775}
752 776
753static void fsl_dma_device_terminate_all(struct dma_chan *chan) 777static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
754{ 778{
755 struct fsl_dma_chan *fsl_chan; 779 struct fsldma_chan *chan;
756 struct fsl_desc_sw *desc, *tmp;
757 unsigned long flags; 780 unsigned long flags;
758 781
759 if (!chan) 782 if (!dchan)
760 return; 783 return;
761 784
762 fsl_chan = to_fsl_chan(chan); 785 chan = to_fsl_chan(dchan);
763 786
764 /* Halt the DMA engine */ 787 /* Halt the DMA engine */
765 dma_halt(fsl_chan); 788 dma_halt(chan);
766 789
767 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 790 spin_lock_irqsave(&chan->desc_lock, flags);
768 791
769 /* Remove and free all of the descriptors in the LD queue */ 792 /* Remove and free all of the descriptors in the LD queue */
770 list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { 793 fsldma_free_desc_list(chan, &chan->ld_pending);
771 list_del(&desc->node); 794 fsldma_free_desc_list(chan, &chan->ld_running);
772 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
773 }
774 795
775 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 796 spin_unlock_irqrestore(&chan->desc_lock, flags);
776} 797}
777 798
778/** 799/**
779 * fsl_dma_update_completed_cookie - Update the completed cookie. 800 * fsl_dma_update_completed_cookie - Update the completed cookie.
780 * @fsl_chan : Freescale DMA channel 801 * @chan : Freescale DMA channel
802 *
803 * CONTEXT: hardirq
781 */ 804 */
782static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) 805static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
783{ 806{
784 struct fsl_desc_sw *cur_desc, *desc; 807 struct fsl_desc_sw *desc;
785 dma_addr_t ld_phy; 808 unsigned long flags;
809 dma_cookie_t cookie;
786 810
787 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; 811 spin_lock_irqsave(&chan->desc_lock, flags);
788 812
789 if (ld_phy) { 813 if (list_empty(&chan->ld_running)) {
790 cur_desc = NULL; 814 dev_dbg(chan->dev, "no running descriptors\n");
791 list_for_each_entry(desc, &fsl_chan->ld_queue, node) 815 goto out_unlock;
792 if (desc->async_tx.phys == ld_phy) { 816 }
793 cur_desc = desc;
794 break;
795 }
796 817
797 if (cur_desc && cur_desc->async_tx.cookie) { 818 /* Get the last descriptor, update the cookie to that */
798 if (dma_is_idle(fsl_chan)) 819 desc = to_fsl_desc(chan->ld_running.prev);
799 fsl_chan->completed_cookie = 820 if (dma_is_idle(chan))
800 cur_desc->async_tx.cookie; 821 cookie = desc->async_tx.cookie;
801 else 822 else {
802 fsl_chan->completed_cookie = 823 cookie = desc->async_tx.cookie - 1;
803 cur_desc->async_tx.cookie - 1; 824 if (unlikely(cookie < DMA_MIN_COOKIE))
804 } 825 cookie = DMA_MAX_COOKIE;
805 } 826 }
827
828 chan->completed_cookie = cookie;
829
830out_unlock:
831 spin_unlock_irqrestore(&chan->desc_lock, flags);
832}
833
834/**
835 * fsldma_desc_status - Check the status of a descriptor
836 * @chan: Freescale DMA channel
837 * @desc: DMA SW descriptor
838 *
839 * This function will return the status of the given descriptor
840 */
841static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
842 struct fsl_desc_sw *desc)
843{
844 return dma_async_is_complete(desc->async_tx.cookie,
845 chan->completed_cookie,
846 chan->common.cookie);
806} 847}
807 848
808/** 849/**
809 * fsl_chan_ld_cleanup - Clean up link descriptors 850 * fsl_chan_ld_cleanup - Clean up link descriptors
810 * @fsl_chan : Freescale DMA channel 851 * @chan : Freescale DMA channel
811 * 852 *
812 * This function clean up the ld_queue of DMA channel. 853 * This function clean up the ld_queue of DMA channel.
813 * If 'in_intr' is set, the function will move the link descriptor to
814 * the recycle list. Otherwise, free it directly.
815 */ 854 */
816static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) 855static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
817{ 856{
818 struct fsl_desc_sw *desc, *_desc; 857 struct fsl_desc_sw *desc, *_desc;
819 unsigned long flags; 858 unsigned long flags;
820 859
821 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 860 spin_lock_irqsave(&chan->desc_lock, flags);
822 861
823 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 862 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
824 fsl_chan->completed_cookie); 863 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
825 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
826 dma_async_tx_callback callback; 864 dma_async_tx_callback callback;
827 void *callback_param; 865 void *callback_param;
828 866
829 if (dma_async_is_complete(desc->async_tx.cookie, 867 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
830 fsl_chan->completed_cookie, fsl_chan->common.cookie)
831 == DMA_IN_PROGRESS)
832 break; 868 break;
833 869
834 callback = desc->async_tx.callback; 870 /* Remove from the list of running transactions */
835 callback_param = desc->async_tx.callback_param;
836
837 /* Remove from ld_queue list */
838 list_del(&desc->node); 871 list_del(&desc->node);
839 872
840 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
841 desc);
842 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
843
844 /* Run the link descriptor callback function */ 873 /* Run the link descriptor callback function */
874 callback = desc->async_tx.callback;
875 callback_param = desc->async_tx.callback_param;
845 if (callback) { 876 if (callback) {
846 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 877 spin_unlock_irqrestore(&chan->desc_lock, flags);
847 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", 878 dev_dbg(chan->dev, "LD %p callback\n", desc);
848 desc);
849 callback(callback_param); 879 callback(callback_param);
850 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 880 spin_lock_irqsave(&chan->desc_lock, flags);
851 } 881 }
882
883 /* Run any dependencies, then free the descriptor */
884 dma_run_dependencies(&desc->async_tx);
885 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
852 } 886 }
853 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 887
888 spin_unlock_irqrestore(&chan->desc_lock, flags);
854} 889}
855 890
856/** 891/**
857 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. 892 * fsl_chan_xfer_ld_queue - transfer any pending transactions
858 * @fsl_chan : Freescale DMA channel 893 * @chan : Freescale DMA channel
894 *
895 * This will make sure that any pending transactions will be run.
896 * If the DMA controller is idle, it will be started. Otherwise,
897 * the DMA controller's interrupt handler will start any pending
898 * transactions when it becomes idle.
859 */ 899 */
860static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) 900static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
861{ 901{
862 struct list_head *ld_node; 902 struct fsl_desc_sw *desc;
863 dma_addr_t next_dest_addr;
864 unsigned long flags; 903 unsigned long flags;
865 904
866 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 905 spin_lock_irqsave(&chan->desc_lock, flags);
867 906
868 if (!dma_is_idle(fsl_chan)) 907 /*
908 * If the list of pending descriptors is empty, then we
909 * don't need to do any work at all
910 */
911 if (list_empty(&chan->ld_pending)) {
912 dev_dbg(chan->dev, "no pending LDs\n");
869 goto out_unlock; 913 goto out_unlock;
914 }
870 915
871 dma_halt(fsl_chan); 916 /*
917 * The DMA controller is not idle, which means the interrupt
918 * handler will start any queued transactions when it runs
919 * at the end of the current transaction
920 */
921 if (!dma_is_idle(chan)) {
922 dev_dbg(chan->dev, "DMA controller still busy\n");
923 goto out_unlock;
924 }
872 925
873 /* If there are some link descriptors 926 /*
874 * not transfered in queue. We need to start it. 927 * TODO:
928 * make sure the dma_halt() function really un-wedges the
929 * controller as much as possible
875 */ 930 */
931 dma_halt(chan);
876 932
877 /* Find the first un-transfer desciptor */ 933 /*
878 for (ld_node = fsl_chan->ld_queue.next; 934 * If there are some link descriptors which have not been
879 (ld_node != &fsl_chan->ld_queue) 935 * transferred, we need to start the controller
880 && (dma_async_is_complete( 936 */
881 to_fsl_desc(ld_node)->async_tx.cookie, 937
882 fsl_chan->completed_cookie, 938 /*
883 fsl_chan->common.cookie) == DMA_SUCCESS); 939 * Move all elements from the queue of pending transactions
884 ld_node = ld_node->next); 940 * onto the list of running transactions
885 941 */
886 if (ld_node != &fsl_chan->ld_queue) { 942 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
887 /* Get the ld start address from ld_queue */ 943 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
888 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 944
889 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", 945 /*
890 (unsigned long long)next_dest_addr); 946 * Program the descriptor's address into the DMA controller,
891 set_cdar(fsl_chan, next_dest_addr); 947 * then start the DMA transaction
892 dma_start(fsl_chan); 948 */
893 } else { 949 set_cdar(chan, desc->async_tx.phys);
894 set_cdar(fsl_chan, 0); 950 dma_start(chan);
895 set_ndar(fsl_chan, 0);
896 }
897 951
898out_unlock: 952out_unlock:
899 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 953 spin_unlock_irqrestore(&chan->desc_lock, flags);
900} 954}
901 955
902/** 956/**
903 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 957 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
904 * @fsl_chan : Freescale DMA channel 958 * @chan : Freescale DMA channel
905 */ 959 */
906static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) 960static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
907{ 961{
908 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 962 struct fsldma_chan *chan = to_fsl_chan(dchan);
909 963 fsl_chan_xfer_ld_queue(chan);
910#ifdef FSL_DMA_LD_DEBUG
911 struct fsl_desc_sw *ld;
912 unsigned long flags;
913
914 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
915 if (list_empty(&fsl_chan->ld_queue)) {
916 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
917 return;
918 }
919
920 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
921 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
922 int i;
923 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
924 fsl_chan->id, ld->async_tx.phys);
925 for (i = 0; i < 8; i++)
926 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
927 i, *(((u32 *)&ld->hw) + i));
928 }
929 dev_dbg(fsl_chan->dev, "----------------\n");
930 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
931#endif
932
933 fsl_chan_xfer_ld_queue(fsl_chan);
934} 964}
935 965
936/** 966/**
937 * fsl_dma_is_complete - Determine the DMA status 967 * fsl_dma_is_complete - Determine the DMA status
938 * @fsl_chan : Freescale DMA channel 968 * @chan : Freescale DMA channel
939 */ 969 */
940static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, 970static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
941 dma_cookie_t cookie, 971 dma_cookie_t cookie,
942 dma_cookie_t *done, 972 dma_cookie_t *done,
943 dma_cookie_t *used) 973 dma_cookie_t *used)
944{ 974{
945 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 975 struct fsldma_chan *chan = to_fsl_chan(dchan);
946 dma_cookie_t last_used; 976 dma_cookie_t last_used;
947 dma_cookie_t last_complete; 977 dma_cookie_t last_complete;
948 978
949 fsl_chan_ld_cleanup(fsl_chan); 979 fsl_chan_ld_cleanup(chan);
950 980
951 last_used = chan->cookie; 981 last_used = dchan->cookie;
952 last_complete = fsl_chan->completed_cookie; 982 last_complete = chan->completed_cookie;
953 983
954 if (done) 984 if (done)
955 *done = last_complete; 985 *done = last_complete;
@@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
960 return dma_async_is_complete(cookie, last_complete, last_used); 990 return dma_async_is_complete(cookie, last_complete, last_used);
961} 991}
962 992
963static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 993/*----------------------------------------------------------------------------*/
994/* Interrupt Handling */
995/*----------------------------------------------------------------------------*/
996
997static irqreturn_t fsldma_chan_irq(int irq, void *data)
964{ 998{
965 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 999 struct fsldma_chan *chan = data;
966 u32 stat;
967 int update_cookie = 0; 1000 int update_cookie = 0;
968 int xfer_ld_q = 0; 1001 int xfer_ld_q = 0;
1002 u32 stat;
969 1003
970 stat = get_sr(fsl_chan); 1004 /* save and clear the status register */
971 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", 1005 stat = get_sr(chan);
972 fsl_chan->id, stat); 1006 set_sr(chan, stat);
973 set_sr(fsl_chan, stat); /* Clear the event register */ 1007 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
974 1008
975 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1009 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
976 if (!stat) 1010 if (!stat)
977 return IRQ_NONE; 1011 return IRQ_NONE;
978 1012
979 if (stat & FSL_DMA_SR_TE) 1013 if (stat & FSL_DMA_SR_TE)
980 dev_err(fsl_chan->dev, "Transfer Error!\n"); 1014 dev_err(chan->dev, "Transfer Error!\n");
981 1015
982 /* Programming Error 1016 /*
1017 * Programming Error
983 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 1018 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
984 * triger a PE interrupt. 1019 * triger a PE interrupt.
985 */ 1020 */
986 if (stat & FSL_DMA_SR_PE) { 1021 if (stat & FSL_DMA_SR_PE) {
987 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); 1022 dev_dbg(chan->dev, "irq: Programming Error INT\n");
988 if (get_bcr(fsl_chan) == 0) { 1023 if (get_bcr(chan) == 0) {
989 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 1024 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
990 * Now, update the completed cookie, and continue the 1025 * Now, update the completed cookie, and continue the
991 * next uncompleted transfer. 1026 * next uncompleted transfer.
@@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
996 stat &= ~FSL_DMA_SR_PE; 1031 stat &= ~FSL_DMA_SR_PE;
997 } 1032 }
998 1033
999 /* If the link descriptor segment transfer finishes, 1034 /*
1035 * If the link descriptor segment transfer finishes,
1000 * we will recycle the used descriptor. 1036 * we will recycle the used descriptor.
1001 */ 1037 */
1002 if (stat & FSL_DMA_SR_EOSI) { 1038 if (stat & FSL_DMA_SR_EOSI) {
1003 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 1039 dev_dbg(chan->dev, "irq: End-of-segments INT\n");
1004 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", 1040 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
1005 (unsigned long long)get_cdar(fsl_chan), 1041 (unsigned long long)get_cdar(chan),
1006 (unsigned long long)get_ndar(fsl_chan)); 1042 (unsigned long long)get_ndar(chan));
1007 stat &= ~FSL_DMA_SR_EOSI; 1043 stat &= ~FSL_DMA_SR_EOSI;
1008 update_cookie = 1; 1044 update_cookie = 1;
1009 } 1045 }
1010 1046
1011 /* For MPC8349, EOCDI event need to update cookie 1047 /*
1048 * For MPC8349, EOCDI event need to update cookie
1012 * and start the next transfer if it exist. 1049 * and start the next transfer if it exist.
1013 */ 1050 */
1014 if (stat & FSL_DMA_SR_EOCDI) { 1051 if (stat & FSL_DMA_SR_EOCDI) {
1015 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); 1052 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
1016 stat &= ~FSL_DMA_SR_EOCDI; 1053 stat &= ~FSL_DMA_SR_EOCDI;
1017 update_cookie = 1; 1054 update_cookie = 1;
1018 xfer_ld_q = 1; 1055 xfer_ld_q = 1;
1019 } 1056 }
1020 1057
1021 /* If it current transfer is the end-of-transfer, 1058 /*
1059 * If it current transfer is the end-of-transfer,
1022 * we should clear the Channel Start bit for 1060 * we should clear the Channel Start bit for
1023 * prepare next transfer. 1061 * prepare next transfer.
1024 */ 1062 */
1025 if (stat & FSL_DMA_SR_EOLNI) { 1063 if (stat & FSL_DMA_SR_EOLNI) {
1026 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); 1064 dev_dbg(chan->dev, "irq: End-of-link INT\n");
1027 stat &= ~FSL_DMA_SR_EOLNI; 1065 stat &= ~FSL_DMA_SR_EOLNI;
1028 xfer_ld_q = 1; 1066 xfer_ld_q = 1;
1029 } 1067 }
1030 1068
1031 if (update_cookie) 1069 if (update_cookie)
1032 fsl_dma_update_completed_cookie(fsl_chan); 1070 fsl_dma_update_completed_cookie(chan);
1033 if (xfer_ld_q) 1071 if (xfer_ld_q)
1034 fsl_chan_xfer_ld_queue(fsl_chan); 1072 fsl_chan_xfer_ld_queue(chan);
1035 if (stat) 1073 if (stat)
1036 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", 1074 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
1037 stat);
1038 1075
1039 dev_dbg(fsl_chan->dev, "event: Exit\n"); 1076 dev_dbg(chan->dev, "irq: Exit\n");
1040 tasklet_schedule(&fsl_chan->tasklet); 1077 tasklet_schedule(&chan->tasklet);
1041 return IRQ_HANDLED; 1078 return IRQ_HANDLED;
1042} 1079}
1043 1080
1044static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) 1081static void dma_do_tasklet(unsigned long data)
1045{ 1082{
1046 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; 1083 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1047 u32 gsr; 1084 fsl_chan_ld_cleanup(chan);
1048 int ch_nr; 1085}
1086
1087static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1088{
1089 struct fsldma_device *fdev = data;
1090 struct fsldma_chan *chan;
1091 unsigned int handled = 0;
1092 u32 gsr, mask;
1093 int i;
1049 1094
1050 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) 1095 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1051 : in_le32(fdev->reg_base); 1096 : in_le32(fdev->regs);
1052 ch_nr = (32 - ffs(gsr)) / 8; 1097 mask = 0xff000000;
1098 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1099
1100 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1101 chan = fdev->chan[i];
1102 if (!chan)
1103 continue;
1104
1105 if (gsr & mask) {
1106 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1107 fsldma_chan_irq(irq, chan);
1108 handled++;
1109 }
1053 1110
1054 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, 1111 gsr &= ~mask;
1055 fdev->chan[ch_nr]) : IRQ_NONE; 1112 mask >>= 8;
1113 }
1114
1115 return IRQ_RETVAL(handled);
1056} 1116}
1057 1117
1058static void dma_do_tasklet(unsigned long data) 1118static void fsldma_free_irqs(struct fsldma_device *fdev)
1059{ 1119{
1060 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 1120 struct fsldma_chan *chan;
1061 fsl_chan_ld_cleanup(fsl_chan); 1121 int i;
1122
1123 if (fdev->irq != NO_IRQ) {
1124 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1125 free_irq(fdev->irq, fdev);
1126 return;
1127 }
1128
1129 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1130 chan = fdev->chan[i];
1131 if (chan && chan->irq != NO_IRQ) {
1132 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
1133 free_irq(chan->irq, chan);
1134 }
1135 }
1062} 1136}
1063 1137
1064static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, 1138static int fsldma_request_irqs(struct fsldma_device *fdev)
1139{
1140 struct fsldma_chan *chan;
1141 int ret;
1142 int i;
1143
1144 /* if we have a per-controller IRQ, use that */
1145 if (fdev->irq != NO_IRQ) {
1146 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1147 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1148 "fsldma-controller", fdev);
1149 return ret;
1150 }
1151
1152 /* no per-controller IRQ, use the per-channel IRQs */
1153 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1154 chan = fdev->chan[i];
1155 if (!chan)
1156 continue;
1157
1158 if (chan->irq == NO_IRQ) {
1159 dev_err(fdev->dev, "no interrupts property defined for "
1160 "DMA channel %d. Please fix your "
1161 "device tree\n", chan->id);
1162 ret = -ENODEV;
1163 goto out_unwind;
1164 }
1165
1166 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
1167 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1168 "fsldma-chan", chan);
1169 if (ret) {
1170 dev_err(fdev->dev, "unable to request IRQ for DMA "
1171 "channel %d\n", chan->id);
1172 goto out_unwind;
1173 }
1174 }
1175
1176 return 0;
1177
1178out_unwind:
1179 for (/* none */; i >= 0; i--) {
1180 chan = fdev->chan[i];
1181 if (!chan)
1182 continue;
1183
1184 if (chan->irq == NO_IRQ)
1185 continue;
1186
1187 free_irq(chan->irq, chan);
1188 }
1189
1190 return ret;
1191}
1192
1193/*----------------------------------------------------------------------------*/
1194/* OpenFirmware Subsystem */
1195/*----------------------------------------------------------------------------*/
1196
1197static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1065 struct device_node *node, u32 feature, const char *compatible) 1198 struct device_node *node, u32 feature, const char *compatible)
1066{ 1199{
1067 struct fsl_dma_chan *new_fsl_chan; 1200 struct fsldma_chan *chan;
1201 struct resource res;
1068 int err; 1202 int err;
1069 1203
1070 /* alloc channel */ 1204 /* alloc channel */
1071 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); 1205 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1072 if (!new_fsl_chan) { 1206 if (!chan) {
1073 dev_err(fdev->dev, "No free memory for allocating " 1207 dev_err(fdev->dev, "no free memory for DMA channels!\n");
1074 "dma channels!\n"); 1208 err = -ENOMEM;
1075 return -ENOMEM; 1209 goto out_return;
1076 } 1210 }
1077 1211
1078 /* get dma channel register base */ 1212 /* ioremap registers for use */
1079 err = of_address_to_resource(node, 0, &new_fsl_chan->reg); 1213 chan->regs = of_iomap(node, 0);
1080 if (err) { 1214 if (!chan->regs) {
1081 dev_err(fdev->dev, "Can't get %s property 'reg'\n", 1215 dev_err(fdev->dev, "unable to ioremap registers\n");
1082 node->full_name); 1216 err = -ENOMEM;
1083 goto err_no_reg; 1217 goto out_free_chan;
1084 } 1218 }
1085 1219
1086 new_fsl_chan->feature = feature; 1220 err = of_address_to_resource(node, 0, &res);
1221 if (err) {
1222 dev_err(fdev->dev, "unable to find 'reg' property\n");
1223 goto out_iounmap_regs;
1224 }
1087 1225
1226 chan->feature = feature;
1088 if (!fdev->feature) 1227 if (!fdev->feature)
1089 fdev->feature = new_fsl_chan->feature; 1228 fdev->feature = chan->feature;
1090 1229
1091 /* If the DMA device's feature is different than its channels', 1230 /*
1092 * report the bug. 1231 * If the DMA device's feature is different than the feature
1232 * of its channels, report the bug
1093 */ 1233 */
1094 WARN_ON(fdev->feature != new_fsl_chan->feature); 1234 WARN_ON(fdev->feature != chan->feature);
1095 1235
1096 new_fsl_chan->dev = fdev->dev; 1236 chan->dev = fdev->dev;
1097 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 1237 chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1098 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 1238 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1099 1239 dev_err(fdev->dev, "too many channels for device\n");
1100 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
1101 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1102 dev_err(fdev->dev, "There is no %d channel!\n",
1103 new_fsl_chan->id);
1104 err = -EINVAL; 1240 err = -EINVAL;
1105 goto err_no_chan; 1241 goto out_iounmap_regs;
1106 } 1242 }
1107 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
1108 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
1109 (unsigned long)new_fsl_chan);
1110 1243
1111 /* Init the channel */ 1244 fdev->chan[chan->id] = chan;
1112 dma_init(new_fsl_chan); 1245 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1246
1247 /* Initialize the channel */
1248 dma_init(chan);
1113 1249
1114 /* Clear cdar registers */ 1250 /* Clear cdar registers */
1115 set_cdar(new_fsl_chan, 0); 1251 set_cdar(chan, 0);
1116 1252
1117 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { 1253 switch (chan->feature & FSL_DMA_IP_MASK) {
1118 case FSL_DMA_IP_85XX: 1254 case FSL_DMA_IP_85XX:
1119 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1255 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1120 case FSL_DMA_IP_83XX: 1256 case FSL_DMA_IP_83XX:
1121 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1257 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1122 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1258 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1123 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; 1259 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1124 new_fsl_chan->set_request_count = fsl_chan_set_request_count; 1260 chan->set_request_count = fsl_chan_set_request_count;
1125 } 1261 }
1126 1262
1127 spin_lock_init(&new_fsl_chan->desc_lock); 1263 spin_lock_init(&chan->desc_lock);
1128 INIT_LIST_HEAD(&new_fsl_chan->ld_queue); 1264 INIT_LIST_HEAD(&chan->ld_pending);
1265 INIT_LIST_HEAD(&chan->ld_running);
1266
1267 chan->common.device = &fdev->common;
1129 1268
1130 new_fsl_chan->common.device = &fdev->common; 1269 /* find the IRQ line, if it exists in the device tree */
1270 chan->irq = irq_of_parse_and_map(node, 0);
1131 1271
1132 /* Add the channel to DMA device channel list */ 1272 /* Add the channel to DMA device channel list */
1133 list_add_tail(&new_fsl_chan->common.device_node, 1273 list_add_tail(&chan->common.device_node, &fdev->common.channels);
1134 &fdev->common.channels);
1135 fdev->common.chancnt++; 1274 fdev->common.chancnt++;
1136 1275
1137 new_fsl_chan->irq = irq_of_parse_and_map(node, 0); 1276 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1138 if (new_fsl_chan->irq != NO_IRQ) { 1277 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1139 err = request_irq(new_fsl_chan->irq,
1140 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
1141 "fsldma-channel", new_fsl_chan);
1142 if (err) {
1143 dev_err(fdev->dev, "DMA channel %s request_irq error "
1144 "with return %d\n", node->full_name, err);
1145 goto err_no_irq;
1146 }
1147 }
1148
1149 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1150 compatible,
1151 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
1152 1278
1153 return 0; 1279 return 0;
1154 1280
1155err_no_irq: 1281out_iounmap_regs:
1156 list_del(&new_fsl_chan->common.device_node); 1282 iounmap(chan->regs);
1157err_no_chan: 1283out_free_chan:
1158 iounmap(new_fsl_chan->reg_base); 1284 kfree(chan);
1159err_no_reg: 1285out_return:
1160 kfree(new_fsl_chan);
1161 return err; 1286 return err;
1162} 1287}
1163 1288
1164static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) 1289static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1165{ 1290{
1166 if (fchan->irq != NO_IRQ) 1291 irq_dispose_mapping(chan->irq);
1167 free_irq(fchan->irq, fchan); 1292 list_del(&chan->common.device_node);
1168 list_del(&fchan->common.device_node); 1293 iounmap(chan->regs);
1169 iounmap(fchan->reg_base); 1294 kfree(chan);
1170 kfree(fchan);
1171} 1295}
1172 1296
1173static int __devinit of_fsl_dma_probe(struct of_device *dev, 1297static int __devinit fsldma_of_probe(struct of_device *op,
1174 const struct of_device_id *match) 1298 const struct of_device_id *match)
1175{ 1299{
1176 int err; 1300 struct fsldma_device *fdev;
1177 struct fsl_dma_device *fdev;
1178 struct device_node *child; 1301 struct device_node *child;
1302 int err;
1179 1303
1180 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 1304 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1181 if (!fdev) { 1305 if (!fdev) {
1182 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 1306 dev_err(&op->dev, "No enough memory for 'priv'\n");
1183 return -ENOMEM; 1307 err = -ENOMEM;
1308 goto out_return;
1184 } 1309 }
1185 fdev->dev = &dev->dev; 1310
1311 fdev->dev = &op->dev;
1186 INIT_LIST_HEAD(&fdev->common.channels); 1312 INIT_LIST_HEAD(&fdev->common.channels);
1187 1313
1188 /* get DMA controller register base */ 1314 /* ioremap the registers for use */
1189 err = of_address_to_resource(dev->node, 0, &fdev->reg); 1315 fdev->regs = of_iomap(op->node, 0);
1190 if (err) { 1316 if (!fdev->regs) {
1191 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1317 dev_err(&op->dev, "unable to ioremap registers\n");
1192 dev->node->full_name); 1318 err = -ENOMEM;
1193 goto err_no_reg; 1319 goto out_free_fdev;
1194 } 1320 }
1195 1321
1196 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1322 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1197 "controller at 0x%llx...\n", 1323 fdev->irq = irq_of_parse_and_map(op->node, 0);
1198 match->compatible, (unsigned long long)fdev->reg.start);
1199 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
1200 - fdev->reg.start + 1);
1201 1324
1202 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1325 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1203 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1326 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1210 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1333 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1211 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1334 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1212 fdev->common.device_terminate_all = fsl_dma_device_terminate_all; 1335 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1213 fdev->common.dev = &dev->dev; 1336 fdev->common.dev = &op->dev;
1214 1337
1215 fdev->irq = irq_of_parse_and_map(dev->node, 0); 1338 dev_set_drvdata(&op->dev, fdev);
1216 if (fdev->irq != NO_IRQ) {
1217 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
1218 "fsldma-device", fdev);
1219 if (err) {
1220 dev_err(&dev->dev, "DMA device request_irq error "
1221 "with return %d\n", err);
1222 goto err;
1223 }
1224 }
1225
1226 dev_set_drvdata(&(dev->dev), fdev);
1227 1339
1228 /* We cannot use of_platform_bus_probe() because there is no 1340 /*
1229 * of_platform_bus_remove. Instead, we manually instantiate every DMA 1341 * We cannot use of_platform_bus_probe() because there is no
1342 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1230 * channel object. 1343 * channel object.
1231 */ 1344 */
1232 for_each_child_of_node(dev->node, child) { 1345 for_each_child_of_node(op->node, child) {
1233 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) 1346 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1234 fsl_dma_chan_probe(fdev, child, 1347 fsl_dma_chan_probe(fdev, child,
1235 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1348 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1236 "fsl,eloplus-dma-channel"); 1349 "fsl,eloplus-dma-channel");
1237 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) 1350 }
1351
1352 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1238 fsl_dma_chan_probe(fdev, child, 1353 fsl_dma_chan_probe(fdev, child,
1239 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1354 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1240 "fsl,elo-dma-channel"); 1355 "fsl,elo-dma-channel");
1356 }
1357 }
1358
1359 /*
1360 * Hookup the IRQ handler(s)
1361 *
1362 * If we have a per-controller interrupt, we prefer that to the
1363 * per-channel interrupts to reduce the number of shared interrupt
1364 * handlers on the same IRQ line
1365 */
1366 err = fsldma_request_irqs(fdev);
1367 if (err) {
1368 dev_err(fdev->dev, "unable to request IRQs\n");
1369 goto out_free_fdev;
1241 } 1370 }
1242 1371
1243 dma_async_device_register(&fdev->common); 1372 dma_async_device_register(&fdev->common);
1244 return 0; 1373 return 0;
1245 1374
1246err: 1375out_free_fdev:
1247 iounmap(fdev->reg_base); 1376 irq_dispose_mapping(fdev->irq);
1248err_no_reg:
1249 kfree(fdev); 1377 kfree(fdev);
1378out_return:
1250 return err; 1379 return err;
1251} 1380}
1252 1381
1253static int of_fsl_dma_remove(struct of_device *of_dev) 1382static int fsldma_of_remove(struct of_device *op)
1254{ 1383{
1255 struct fsl_dma_device *fdev; 1384 struct fsldma_device *fdev;
1256 unsigned int i; 1385 unsigned int i;
1257 1386
1258 fdev = dev_get_drvdata(&of_dev->dev); 1387 fdev = dev_get_drvdata(&op->dev);
1259
1260 dma_async_device_unregister(&fdev->common); 1388 dma_async_device_unregister(&fdev->common);
1261 1389
1262 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) 1390 fsldma_free_irqs(fdev);
1391
1392 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1263 if (fdev->chan[i]) 1393 if (fdev->chan[i])
1264 fsl_dma_chan_remove(fdev->chan[i]); 1394 fsl_dma_chan_remove(fdev->chan[i]);
1395 }
1265 1396
1266 if (fdev->irq != NO_IRQ) 1397 iounmap(fdev->regs);
1267 free_irq(fdev->irq, fdev); 1398 dev_set_drvdata(&op->dev, NULL);
1268
1269 iounmap(fdev->reg_base);
1270
1271 kfree(fdev); 1399 kfree(fdev);
1272 dev_set_drvdata(&of_dev->dev, NULL);
1273 1400
1274 return 0; 1401 return 0;
1275} 1402}
1276 1403
1277static struct of_device_id of_fsl_dma_ids[] = { 1404static const struct of_device_id fsldma_of_ids[] = {
1278 { .compatible = "fsl,eloplus-dma", }, 1405 { .compatible = "fsl,eloplus-dma", },
1279 { .compatible = "fsl,elo-dma", }, 1406 { .compatible = "fsl,elo-dma", },
1280 {} 1407 {}
1281}; 1408};
1282 1409
1283static struct of_platform_driver of_fsl_dma_driver = { 1410static struct of_platform_driver fsldma_of_driver = {
1284 .name = "fsl-elo-dma", 1411 .name = "fsl-elo-dma",
1285 .match_table = of_fsl_dma_ids, 1412 .match_table = fsldma_of_ids,
1286 .probe = of_fsl_dma_probe, 1413 .probe = fsldma_of_probe,
1287 .remove = of_fsl_dma_remove, 1414 .remove = fsldma_of_remove,
1288}; 1415};
1289 1416
1290static __init int of_fsl_dma_init(void) 1417/*----------------------------------------------------------------------------*/
1418/* Module Init / Exit */
1419/*----------------------------------------------------------------------------*/
1420
1421static __init int fsldma_init(void)
1291{ 1422{
1292 int ret; 1423 int ret;
1293 1424
1294 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1425 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1295 1426
1296 ret = of_register_platform_driver(&of_fsl_dma_driver); 1427 ret = of_register_platform_driver(&fsldma_of_driver);
1297 if (ret) 1428 if (ret)
1298 pr_err("fsldma: failed to register platform driver\n"); 1429 pr_err("fsldma: failed to register platform driver\n");
1299 1430
1300 return ret; 1431 return ret;
1301} 1432}
1302 1433
1303static void __exit of_fsl_dma_exit(void) 1434static void __exit fsldma_exit(void)
1304{ 1435{
1305 of_unregister_platform_driver(&of_fsl_dma_driver); 1436 of_unregister_platform_driver(&fsldma_of_driver);
1306} 1437}
1307 1438
1308subsys_initcall(of_fsl_dma_init); 1439subsys_initcall(fsldma_init);
1309module_exit(of_fsl_dma_exit); 1440module_exit(fsldma_exit);
1310 1441
1311MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1442MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1312MODULE_LICENSE("GPL"); 1443MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 0df14cbb8ca3..cb4d6ff51597 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -92,11 +92,9 @@ struct fsl_desc_sw {
92 struct list_head node; 92 struct list_head node;
93 struct list_head tx_list; 93 struct list_head tx_list;
94 struct dma_async_tx_descriptor async_tx; 94 struct dma_async_tx_descriptor async_tx;
95 struct list_head *ld;
96 void *priv;
97} __attribute__((aligned(32))); 95} __attribute__((aligned(32)));
98 96
99struct fsl_dma_chan_regs { 97struct fsldma_chan_regs {
100 u32 mr; /* 0x00 - Mode Register */ 98 u32 mr; /* 0x00 - Mode Register */
101 u32 sr; /* 0x04 - Status Register */ 99 u32 sr; /* 0x04 - Status Register */
102 u64 cdar; /* 0x08 - Current descriptor address register */ 100 u64 cdar; /* 0x08 - Current descriptor address register */
@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs {
106 u64 ndar; /* 0x24 - Next Descriptor Address Register */ 104 u64 ndar; /* 0x24 - Next Descriptor Address Register */
107}; 105};
108 106
109struct fsl_dma_chan; 107struct fsldma_chan;
110#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 108#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
111 109
112struct fsl_dma_device { 110struct fsldma_device {
113 void __iomem *reg_base; /* DGSR register base */ 111 void __iomem *regs; /* DGSR register base */
114 struct resource reg; /* Resource for register */
115 struct device *dev; 112 struct device *dev;
116 struct dma_device common; 113 struct dma_device common;
117 struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; 114 struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
118 u32 feature; /* The same as DMA channels */ 115 u32 feature; /* The same as DMA channels */
119 int irq; /* Channel IRQ */ 116 int irq; /* Channel IRQ */
120}; 117};
121 118
122/* Define macros for fsl_dma_chan->feature property */ 119/* Define macros for fsldma_chan->feature property */
123#define FSL_DMA_LITTLE_ENDIAN 0x00000000 120#define FSL_DMA_LITTLE_ENDIAN 0x00000000
124#define FSL_DMA_BIG_ENDIAN 0x00000001 121#define FSL_DMA_BIG_ENDIAN 0x00000001
125 122
@@ -130,28 +127,28 @@ struct fsl_dma_device {
130#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 127#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
131#define FSL_DMA_CHAN_START_EXT 0x00002000 128#define FSL_DMA_CHAN_START_EXT 0x00002000
132 129
133struct fsl_dma_chan { 130struct fsldma_chan {
134 struct fsl_dma_chan_regs __iomem *reg_base; 131 struct fsldma_chan_regs __iomem *regs;
135 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 132 dma_cookie_t completed_cookie; /* The maximum cookie completed */
136 spinlock_t desc_lock; /* Descriptor operation lock */ 133 spinlock_t desc_lock; /* Descriptor operation lock */
137 struct list_head ld_queue; /* Link descriptors queue */ 134 struct list_head ld_pending; /* Link descriptors queue */
135 struct list_head ld_running; /* Link descriptors queue */
138 struct dma_chan common; /* DMA common channel */ 136 struct dma_chan common; /* DMA common channel */
139 struct dma_pool *desc_pool; /* Descriptors pool */ 137 struct dma_pool *desc_pool; /* Descriptors pool */
140 struct device *dev; /* Channel device */ 138 struct device *dev; /* Channel device */
141 struct resource reg; /* Resource for register */
142 int irq; /* Channel IRQ */ 139 int irq; /* Channel IRQ */
143 int id; /* Raw id of this channel */ 140 int id; /* Raw id of this channel */
144 struct tasklet_struct tasklet; 141 struct tasklet_struct tasklet;
145 u32 feature; 142 u32 feature;
146 143
147 void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); 144 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
148 void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); 145 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
149 void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); 146 void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
150 void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); 147 void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
151 void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); 148 void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
152}; 149};
153 150
154#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) 151#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
155#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) 152#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
156#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) 153#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
157 154
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index dcc4ab78b32b..5d0e42b263df 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
94 return IRQ_HANDLED; 94 return IRQ_HANDLED;
95} 95}
96 96
97static void ioat1_cleanup_tasklet(unsigned long data);
98
99/* common channel initialization */ 97/* common channel initialization */
100void ioat_init_channel(struct ioatdma_device *device, 98void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
101 struct ioat_chan_common *chan, int idx,
102 void (*timer_fn)(unsigned long),
103 void (*tasklet)(unsigned long),
104 unsigned long ioat)
105{ 99{
106 struct dma_device *dma = &device->common; 100 struct dma_device *dma = &device->common;
101 struct dma_chan *c = &chan->common;
102 unsigned long data = (unsigned long) c;
107 103
108 chan->device = device; 104 chan->device = device;
109 chan->reg_base = device->reg_base + (0x80 * (idx + 1)); 105 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device,
112 list_add_tail(&chan->common.device_node, &dma->channels); 108 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan; 109 device->idx[idx] = chan;
114 init_timer(&chan->timer); 110 init_timer(&chan->timer);
115 chan->timer.function = timer_fn; 111 chan->timer.function = device->timer_fn;
116 chan->timer.data = ioat; 112 chan->timer.data = data;
117 tasklet_init(&chan->cleanup_task, tasklet, ioat); 113 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
118 tasklet_disable(&chan->cleanup_task); 114 tasklet_disable(&chan->cleanup_task);
119} 115}
120 116
121static void ioat1_timer_event(unsigned long data);
122
123/** 117/**
124 * ioat1_dma_enumerate_channels - find and initialize the device's channels 118 * ioat1_dma_enumerate_channels - find and initialize the device's channels
125 * @device: the device to be enumerated 119 * @device: the device to be enumerated
@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
155 if (!ioat) 149 if (!ioat)
156 break; 150 break;
157 151
158 ioat_init_channel(device, &ioat->base, i, 152 ioat_init_channel(device, &ioat->base, i);
159 ioat1_timer_event,
160 ioat1_cleanup_tasklet,
161 (unsigned long) ioat);
162 ioat->xfercap = xfercap; 153 ioat->xfercap = xfercap;
163 spin_lock_init(&ioat->desc_lock); 154 spin_lock_init(&ioat->desc_lock);
164 INIT_LIST_HEAD(&ioat->free_desc); 155 INIT_LIST_HEAD(&ioat->free_desc);
@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
532 return &desc->txd; 523 return &desc->txd;
533} 524}
534 525
535static void ioat1_cleanup_tasklet(unsigned long data) 526static void ioat1_cleanup_event(unsigned long data)
536{ 527{
537 struct ioat_dma_chan *chan = (void *)data; 528 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
538 529
539 ioat1_cleanup(chan); 530 ioat1_cleanup(ioat);
540 writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); 531 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
541} 532}
542 533
543void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 534void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
687 678
688static void ioat1_timer_event(unsigned long data) 679static void ioat1_timer_event(unsigned long data)
689{ 680{
690 struct ioat_dma_chan *ioat = (void *) data; 681 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
691 struct ioat_chan_common *chan = &ioat->base; 682 struct ioat_chan_common *chan = &ioat->base;
692 683
693 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); 684 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data)
734 spin_unlock_bh(&chan->cleanup_lock); 725 spin_unlock_bh(&chan->cleanup_lock);
735} 726}
736 727
737static enum dma_status 728enum dma_status
738ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, 729ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
739 dma_cookie_t *done, dma_cookie_t *used) 730 dma_cookie_t *done, dma_cookie_t *used)
740{ 731{
741 struct ioat_dma_chan *ioat = to_ioat_chan(c); 732 struct ioat_chan_common *chan = to_chan_common(c);
733 struct ioatdma_device *device = chan->device;
742 734
743 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 735 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
744 return DMA_SUCCESS; 736 return DMA_SUCCESS;
745 737
746 ioat1_cleanup(ioat); 738 device->cleanup_fn((unsigned long) c);
747 739
748 return ioat_is_complete(c, cookie, done, used); 740 return ioat_is_complete(c, cookie, done, used);
749} 741}
@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1199 device->intr_quirk = ioat1_intr_quirk; 1191 device->intr_quirk = ioat1_intr_quirk;
1200 device->enumerate_channels = ioat1_enumerate_channels; 1192 device->enumerate_channels = ioat1_enumerate_channels;
1201 device->self_test = ioat_dma_self_test; 1193 device->self_test = ioat_dma_self_test;
1194 device->timer_fn = ioat1_timer_event;
1195 device->cleanup_fn = ioat1_cleanup_event;
1202 dma = &device->common; 1196 dma = &device->common;
1203 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; 1197 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1204 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; 1198 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1205 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; 1199 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1206 dma->device_free_chan_resources = ioat1_dma_free_chan_resources; 1200 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1207 dma->device_is_tx_complete = ioat1_dma_is_complete; 1201 dma->device_is_tx_complete = ioat_is_dma_complete;
1208 1202
1209 err = ioat_probe(device); 1203 err = ioat_probe(device);
1210 if (err) 1204 if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index bbc3e78ef333..4f747a254074 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -61,7 +61,7 @@
61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) 61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
62 * @enumerate_channels: hw version specific channel enumeration 62 * @enumerate_channels: hw version specific channel enumeration
63 * @reset_hw: hw version specific channel (re)initialization 63 * @reset_hw: hw version specific channel (re)initialization
64 * @cleanup_tasklet: select between the v2 and v3 cleanup routines 64 * @cleanup_fn: select between the v2 and v3 cleanup routines
65 * @timer_fn: select between the v2 and v3 timer watchdog routines 65 * @timer_fn: select between the v2 and v3 timer watchdog routines
66 * @self_test: hardware version specific self test for each supported op type 66 * @self_test: hardware version specific self test for each supported op type
67 * 67 *
@@ -80,7 +80,7 @@ struct ioatdma_device {
80 void (*intr_quirk)(struct ioatdma_device *device); 80 void (*intr_quirk)(struct ioatdma_device *device);
81 int (*enumerate_channels)(struct ioatdma_device *device); 81 int (*enumerate_channels)(struct ioatdma_device *device);
82 int (*reset_hw)(struct ioat_chan_common *chan); 82 int (*reset_hw)(struct ioat_chan_common *chan);
83 void (*cleanup_tasklet)(unsigned long data); 83 void (*cleanup_fn)(unsigned long data);
84 void (*timer_fn)(unsigned long data); 84 void (*timer_fn)(unsigned long data);
85 int (*self_test)(struct ioatdma_device *device); 85 int (*self_test)(struct ioatdma_device *device);
86}; 86};
@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
337 void __iomem *iobase); 337 void __iomem *iobase);
338unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 338unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
339void ioat_init_channel(struct ioatdma_device *device, 339void ioat_init_channel(struct ioatdma_device *device,
340 struct ioat_chan_common *chan, int idx, 340 struct ioat_chan_common *chan, int idx);
341 void (*timer_fn)(unsigned long), 341enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
342 void (*tasklet)(unsigned long), 342 dma_cookie_t *done, dma_cookie_t *used);
343 unsigned long ioat);
344void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 343void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
345 size_t len, struct ioat_dma_descriptor *hw); 344 size_t len, struct ioat_dma_descriptor *hw);
346bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 345bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5cc37afe2bc1..1ed5d66d7dca 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
51 51
52void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) 52void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
53{ 53{
54 void * __iomem reg_base = ioat->base.reg_base; 54 struct ioat_chan_common *chan = &ioat->base;
55 55
56 ioat->pending = 0;
57 ioat->dmacount += ioat2_ring_pending(ioat); 56 ioat->dmacount += ioat2_ring_pending(ioat);
58 ioat->issued = ioat->head; 57 ioat->issued = ioat->head;
59 /* make descriptor updates globally visible before notifying channel */ 58 /* make descriptor updates globally visible before notifying channel */
60 wmb(); 59 wmb();
61 writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 60 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
62 dev_dbg(to_dev(&ioat->base), 61 dev_dbg(to_dev(chan),
63 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", 62 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
64 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); 63 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
65} 64}
66 65
67void ioat2_issue_pending(struct dma_chan *chan) 66void ioat2_issue_pending(struct dma_chan *c)
68{ 67{
69 struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); 68 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
70 69
71 spin_lock_bh(&ioat->ring_lock); 70 if (ioat2_ring_pending(ioat)) {
72 if (ioat->pending == 1) 71 spin_lock_bh(&ioat->ring_lock);
73 __ioat2_issue_pending(ioat); 72 __ioat2_issue_pending(ioat);
74 spin_unlock_bh(&ioat->ring_lock); 73 spin_unlock_bh(&ioat->ring_lock);
74 }
75} 75}
76 76
77/** 77/**
78 * ioat2_update_pending - log pending descriptors 78 * ioat2_update_pending - log pending descriptors
79 * @ioat: ioat2+ channel 79 * @ioat: ioat2+ channel
80 * 80 *
81 * set pending to '1' unless pending is already set to '2', pending == 2 81 * Check if the number of unsubmitted descriptors has exceeded the
82 * indicates that submission is temporarily blocked due to an in-flight 82 * watermark. Called with ring_lock held
83 * reset. If we are already above the ioat_pending_level threshold then
84 * just issue pending.
85 *
86 * called with ring_lock held
87 */ 83 */
88static void ioat2_update_pending(struct ioat2_dma_chan *ioat) 84static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
89{ 85{
90 if (unlikely(ioat->pending == 2)) 86 if (ioat2_ring_pending(ioat) > ioat_pending_level)
91 return;
92 else if (ioat2_ring_pending(ioat) > ioat_pending_level)
93 __ioat2_issue_pending(ioat); 87 __ioat2_issue_pending(ioat);
94 else
95 ioat->pending = 1;
96} 88}
97 89
98static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) 90static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
166 seen_current = true; 158 seen_current = true;
167 } 159 }
168 ioat->tail += i; 160 ioat->tail += i;
169 BUG_ON(!seen_current); /* no active descs have written a completion? */ 161 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
170 162
171 chan->last_completion = phys_complete; 163 chan->last_completion = phys_complete;
172 if (ioat->head == ioat->tail) { 164 if (ioat->head == ioat->tail) {
@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
207 spin_unlock_bh(&chan->cleanup_lock); 199 spin_unlock_bh(&chan->cleanup_lock);
208} 200}
209 201
210void ioat2_cleanup_tasklet(unsigned long data) 202void ioat2_cleanup_event(unsigned long data)
211{ 203{
212 struct ioat2_dma_chan *ioat = (void *) data; 204 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
213 205
214 ioat2_cleanup(ioat); 206 ioat2_cleanup(ioat);
215 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 207 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
291 283
292void ioat2_timer_event(unsigned long data) 284void ioat2_timer_event(unsigned long data)
293{ 285{
294 struct ioat2_dma_chan *ioat = (void *) data; 286 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
295 struct ioat_chan_common *chan = &ioat->base; 287 struct ioat_chan_common *chan = &ioat->base;
296 288
297 spin_lock_bh(&chan->cleanup_lock); 289 spin_lock_bh(&chan->cleanup_lock);
@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
397 if (!ioat) 389 if (!ioat)
398 break; 390 break;
399 391
400 ioat_init_channel(device, &ioat->base, i, 392 ioat_init_channel(device, &ioat->base, i);
401 device->timer_fn,
402 device->cleanup_tasklet,
403 (unsigned long) ioat);
404 ioat->xfercap_log = xfercap_log; 393 ioat->xfercap_log = xfercap_log;
405 spin_lock_init(&ioat->ring_lock); 394 spin_lock_init(&ioat->ring_lock);
406 if (device->reset_hw(&ioat->base)) { 395 if (device->reset_hw(&ioat->base)) {
@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
546 ioat->head = 0; 535 ioat->head = 0;
547 ioat->issued = 0; 536 ioat->issued = 0;
548 ioat->tail = 0; 537 ioat->tail = 0;
549 ioat->pending = 0;
550 ioat->alloc_order = order; 538 ioat->alloc_order = order;
551 spin_unlock_bh(&ioat->ring_lock); 539 spin_unlock_bh(&ioat->ring_lock);
552 540
@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
701 689
702 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 690 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
703 spin_unlock_bh(&chan->cleanup_lock); 691 spin_unlock_bh(&chan->cleanup_lock);
704 device->timer_fn((unsigned long) ioat); 692 device->timer_fn((unsigned long) &chan->common);
705 } else 693 } else
706 spin_unlock_bh(&chan->cleanup_lock); 694 spin_unlock_bh(&chan->cleanup_lock);
707 return -ENOMEM; 695 return -ENOMEM;
@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
785 773
786 tasklet_disable(&chan->cleanup_task); 774 tasklet_disable(&chan->cleanup_task);
787 del_timer_sync(&chan->timer); 775 del_timer_sync(&chan->timer);
788 device->cleanup_tasklet((unsigned long) ioat); 776 device->cleanup_fn((unsigned long) c);
789 device->reset_hw(chan); 777 device->reset_hw(chan);
790 778
791 spin_lock_bh(&ioat->ring_lock); 779 spin_lock_bh(&ioat->ring_lock);
@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c)
815 803
816 chan->last_completion = 0; 804 chan->last_completion = 0;
817 chan->completion_dma = 0; 805 chan->completion_dma = 0;
818 ioat->pending = 0;
819 ioat->dmacount = 0; 806 ioat->dmacount = 0;
820} 807}
821 808
822enum dma_status
823ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
824 dma_cookie_t *done, dma_cookie_t *used)
825{
826 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
827 struct ioatdma_device *device = ioat->base.device;
828
829 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
830 return DMA_SUCCESS;
831
832 device->cleanup_tasklet((unsigned long) ioat);
833
834 return ioat_is_complete(c, cookie, done, used);
835}
836
837static ssize_t ring_size_show(struct dma_chan *c, char *page) 809static ssize_t ring_size_show(struct dma_chan *c, char *page)
838{ 810{
839 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 811 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
874 846
875 device->enumerate_channels = ioat2_enumerate_channels; 847 device->enumerate_channels = ioat2_enumerate_channels;
876 device->reset_hw = ioat2_reset_hw; 848 device->reset_hw = ioat2_reset_hw;
877 device->cleanup_tasklet = ioat2_cleanup_tasklet; 849 device->cleanup_fn = ioat2_cleanup_event;
878 device->timer_fn = ioat2_timer_event; 850 device->timer_fn = ioat2_timer_event;
879 device->self_test = ioat_dma_self_test; 851 device->self_test = ioat_dma_self_test;
880 dma = &device->common; 852 dma = &device->common;
@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
882 dma->device_issue_pending = ioat2_issue_pending; 854 dma->device_issue_pending = ioat2_issue_pending;
883 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 855 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
884 dma->device_free_chan_resources = ioat2_free_chan_resources; 856 dma->device_free_chan_resources = ioat2_free_chan_resources;
885 dma->device_is_tx_complete = ioat2_is_complete; 857 dma->device_is_tx_complete = ioat_is_dma_complete;
886 858
887 err = ioat_probe(device); 859 err = ioat_probe(device);
888 if (err) 860 if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 3afad8da43cc..ef2871fd7868 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
47 * @head: allocated index 47 * @head: allocated index
48 * @issued: hardware notification point 48 * @issued: hardware notification point
49 * @tail: cleanup index 49 * @tail: cleanup index
50 * @pending: lock free indicator for issued != head
51 * @dmacount: identical to 'head' except for occasionally resetting to zero 50 * @dmacount: identical to 'head' except for occasionally resetting to zero
52 * @alloc_order: log2 of the number of allocated descriptors 51 * @alloc_order: log2 of the number of allocated descriptors
53 * @ring: software ring buffer implementation of hardware ring 52 * @ring: software ring buffer implementation of hardware ring
@@ -61,7 +60,6 @@ struct ioat2_dma_chan {
61 u16 tail; 60 u16 tail;
62 u16 dmacount; 61 u16 dmacount;
63 u16 alloc_order; 62 u16 alloc_order;
64 int pending;
65 struct ioat_ring_ent **ring; 63 struct ioat_ring_ent **ring;
66 spinlock_t ring_lock; 64 spinlock_t ring_lock;
67}; 65};
@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
178void ioat2_issue_pending(struct dma_chan *chan); 176void ioat2_issue_pending(struct dma_chan *chan);
179int ioat2_alloc_chan_resources(struct dma_chan *c); 177int ioat2_alloc_chan_resources(struct dma_chan *c);
180void ioat2_free_chan_resources(struct dma_chan *c); 178void ioat2_free_chan_resources(struct dma_chan *c);
181enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
182 dma_cookie_t *done, dma_cookie_t *used);
183void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); 179void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
184bool reshape_ring(struct ioat2_dma_chan *ioat, int order); 180bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); 181void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data); 182void ioat2_cleanup_event(unsigned long data);
187void ioat2_timer_event(unsigned long data); 183void ioat2_timer_event(unsigned long data);
188int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); 184int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
189int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); 185int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 9908c9e94b2d..26febc56dab1 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
293 } 293 }
294 } 294 }
295 ioat->tail += i; 295 ioat->tail += i;
296 BUG_ON(!seen_current); /* no active descs have written a completion? */ 296 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
297 chan->last_completion = phys_complete; 297 chan->last_completion = phys_complete;
298 if (ioat->head == ioat->tail) { 298
299 active = ioat2_ring_active(ioat);
300 if (active == 0) {
299 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 301 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
300 __func__); 302 __func__);
301 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 303 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
302 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 304 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
303 } 305 }
306 /* 5 microsecond delay per pending descriptor */
307 writew(min((5 * active), IOAT_INTRDELAY_MASK),
308 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
304} 309}
305 310
306static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 311/* try to cleanup, but yield (via spin_trylock) to incoming submissions
312 * with the expectation that we will immediately poll again shortly
313 */
314static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
307{ 315{
308 struct ioat_chan_common *chan = &ioat->base; 316 struct ioat_chan_common *chan = &ioat->base;
309 unsigned long phys_complete; 317 unsigned long phys_complete;
@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
329 spin_unlock_bh(&chan->cleanup_lock); 337 spin_unlock_bh(&chan->cleanup_lock);
330} 338}
331 339
332static void ioat3_cleanup_tasklet(unsigned long data) 340/* run cleanup now because we already delayed the interrupt via INTRDELAY */
341static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
342{
343 struct ioat_chan_common *chan = &ioat->base;
344 unsigned long phys_complete;
345
346 prefetch(chan->completion);
347
348 spin_lock_bh(&chan->cleanup_lock);
349 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
350 spin_unlock_bh(&chan->cleanup_lock);
351 return;
352 }
353 spin_lock_bh(&ioat->ring_lock);
354
355 __cleanup(ioat, phys_complete);
356
357 spin_unlock_bh(&ioat->ring_lock);
358 spin_unlock_bh(&chan->cleanup_lock);
359}
360
361static void ioat3_cleanup_event(unsigned long data)
333{ 362{
334 struct ioat2_dma_chan *ioat = (void *) data; 363 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
335 364
336 ioat3_cleanup(ioat); 365 ioat3_cleanup_sync(ioat);
337 writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, 366 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
338 ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
339} 367}
340 368
341static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 369static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
342{ 370{
343 struct ioat_chan_common *chan = &ioat->base; 371 struct ioat_chan_common *chan = &ioat->base;
344 unsigned long phys_complete; 372 unsigned long phys_complete;
345 u32 status;
346
347 status = ioat_chansts(chan);
348 if (is_ioat_active(status) || is_ioat_idle(status))
349 ioat_suspend(chan);
350 while (is_ioat_active(status) || is_ioat_idle(status)) {
351 status = ioat_chansts(chan);
352 cpu_relax();
353 }
354 373
374 ioat2_quiesce(chan, 0);
355 if (ioat_cleanup_preamble(chan, &phys_complete)) 375 if (ioat_cleanup_preamble(chan, &phys_complete))
356 __cleanup(ioat, phys_complete); 376 __cleanup(ioat, phys_complete);
357 377
@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
360 380
361static void ioat3_timer_event(unsigned long data) 381static void ioat3_timer_event(unsigned long data)
362{ 382{
363 struct ioat2_dma_chan *ioat = (void *) data; 383 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
364 struct ioat_chan_common *chan = &ioat->base; 384 struct ioat_chan_common *chan = &ioat->base;
365 385
366 spin_lock_bh(&chan->cleanup_lock); 386 spin_lock_bh(&chan->cleanup_lock);
@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
426 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 446 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
427 return DMA_SUCCESS; 447 return DMA_SUCCESS;
428 448
429 ioat3_cleanup(ioat); 449 ioat3_cleanup_poll(ioat);
430 450
431 return ioat_is_complete(c, cookie, done, used); 451 return ioat_is_complete(c, cookie, done, used);
432} 452}
@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1239 1259
1240 if (is_raid_device) { 1260 if (is_raid_device) {
1241 dma->device_is_tx_complete = ioat3_is_complete; 1261 dma->device_is_tx_complete = ioat3_is_complete;
1242 device->cleanup_tasklet = ioat3_cleanup_tasklet; 1262 device->cleanup_fn = ioat3_cleanup_event;
1243 device->timer_fn = ioat3_timer_event; 1263 device->timer_fn = ioat3_timer_event;
1244 } else { 1264 } else {
1245 dma->device_is_tx_complete = ioat2_is_complete; 1265 dma->device_is_tx_complete = ioat_is_dma_complete;
1246 device->cleanup_tasklet = ioat2_cleanup_tasklet; 1266 device->cleanup_fn = ioat2_cleanup_event;
1247 device->timer_fn = ioat2_timer_event; 1267 device->timer_fn = ioat2_timer_event;
1248 } 1268 }
1249 1269
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index e8ae63baf588..1391798542b6 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -60,7 +60,7 @@
60#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ 60#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
61 61
62#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ 62#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
63#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ 63#define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */
64#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ 64#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
65 65
66#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ 66#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index e80bae1673fa..2a446397c884 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
348 break; 348 break;
349 case IPU_PIX_FMT_BGRA32: 349 case IPU_PIX_FMT_BGRA32:
350 case IPU_PIX_FMT_BGR32: 350 case IPU_PIX_FMT_BGR32:
351 case IPU_PIX_FMT_ABGR32:
351 params->ip.bpp = 0; 352 params->ip.bpp = 0;
352 params->ip.pfs = 4; 353 params->ip.pfs = 4;
353 params->ip.npb = 7; 354 params->ip.npb = 7;
@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
376 params->ip.wid2 = 7; /* Blue bit width - 1 */ 377 params->ip.wid2 = 7; /* Blue bit width - 1 */
377 params->ip.wid3 = 7; /* Alpha bit width - 1 */ 378 params->ip.wid3 = 7; /* Alpha bit width - 1 */
378 break; 379 break;
379 case IPU_PIX_FMT_ABGR32:
380 params->ip.bpp = 0;
381 params->ip.pfs = 4;
382 params->ip.npb = 7;
383 params->ip.sat = 2; /* SAT = 32-bit access */
384 params->ip.ofs0 = 8; /* Red bit offset */
385 params->ip.ofs1 = 16; /* Green bit offset */
386 params->ip.ofs2 = 24; /* Blue bit offset */
387 params->ip.ofs3 = 0; /* Alpha bit offset */
388 params->ip.wid0 = 7; /* Red bit width - 1 */
389 params->ip.wid1 = 7; /* Green bit width - 1 */
390 params->ip.wid2 = 7; /* Blue bit width - 1 */
391 params->ip.wid3 = 7; /* Alpha bit width - 1 */
392 break;
393 case IPU_PIX_FMT_UYVY: 380 case IPU_PIX_FMT_UYVY:
394 params->ip.bpp = 2; 381 params->ip.bpp = 2;
395 params->ip.pfs = 6; 382 params->ip.pfs = 6;
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
new file mode 100644
index 000000000000..3fdf1f46bd63
--- /dev/null
+++ b/drivers/dma/mpc512x_dma.c
@@ -0,0 +1,800 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 *
5 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
6 * (defines, structures and comments) was taken from MPC5121 DMA driver
7 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
8 *
9 * Approved as OSADL project by a majority of OSADL members and funded
10 * by OSADL membership fees in 2009; for details see www.osadl.org.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc., 59
24 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 *
26 * The full GNU General Public License is included in this distribution in the
27 * file called COPYING.
28 */
29
30/*
31 * This is initial version of MPC5121 DMA driver. Only memory to memory
32 * transfers are supported (tested using dmatest module).
33 */
34
35#include <linux/module.h>
36#include <linux/dmaengine.h>
37#include <linux/dma-mapping.h>
38#include <linux/interrupt.h>
39#include <linux/io.h>
40#include <linux/of_device.h>
41#include <linux/of_platform.h>
42
43#include <linux/random.h>
44
45/* Number of DMA Transfer descriptors allocated per channel */
46#define MPC_DMA_DESCRIPTORS 64
47
48/* Macro definitions */
49#define MPC_DMA_CHANNELS 64
50#define MPC_DMA_TCD_OFFSET 0x1000
51
52/* Arbitration mode of group and channel */
53#define MPC_DMA_DMACR_EDCG (1 << 31)
54#define MPC_DMA_DMACR_ERGA (1 << 3)
55#define MPC_DMA_DMACR_ERCA (1 << 2)
56
57/* Error codes */
58#define MPC_DMA_DMAES_VLD (1 << 31)
59#define MPC_DMA_DMAES_GPE (1 << 15)
60#define MPC_DMA_DMAES_CPE (1 << 14)
61#define MPC_DMA_DMAES_ERRCHN(err) \
62 (((err) >> 8) & 0x3f)
63#define MPC_DMA_DMAES_SAE (1 << 7)
64#define MPC_DMA_DMAES_SOE (1 << 6)
65#define MPC_DMA_DMAES_DAE (1 << 5)
66#define MPC_DMA_DMAES_DOE (1 << 4)
67#define MPC_DMA_DMAES_NCE (1 << 3)
68#define MPC_DMA_DMAES_SGE (1 << 2)
69#define MPC_DMA_DMAES_SBE (1 << 1)
70#define MPC_DMA_DMAES_DBE (1 << 0)
71
72#define MPC_DMA_TSIZE_1 0x00
73#define MPC_DMA_TSIZE_2 0x01
74#define MPC_DMA_TSIZE_4 0x02
75#define MPC_DMA_TSIZE_16 0x04
76#define MPC_DMA_TSIZE_32 0x05
77
78/* MPC5121 DMA engine registers */
79struct __attribute__ ((__packed__)) mpc_dma_regs {
80 /* 0x00 */
81 u32 dmacr; /* DMA control register */
82 u32 dmaes; /* DMA error status */
83 /* 0x08 */
84 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
85 u32 dmaerql; /* DMA enable request low(channels 31~0) */
86 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
87 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
88 /* 0x18 */
89 u8 dmaserq; /* DMA set enable request */
90 u8 dmacerq; /* DMA clear enable request */
91 u8 dmaseei; /* DMA set enable error interrupt */
92 u8 dmaceei; /* DMA clear enable error interrupt */
93 /* 0x1c */
94 u8 dmacint; /* DMA clear interrupt request */
95 u8 dmacerr; /* DMA clear error */
96 u8 dmassrt; /* DMA set start bit */
97 u8 dmacdne; /* DMA clear DONE status bit */
98 /* 0x20 */
99 u32 dmainth; /* DMA interrupt request high(ch63~32) */
100 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
101 u32 dmaerrh; /* DMA error high(ch63~32) */
102 u32 dmaerrl; /* DMA error low(ch31~0) */
103 /* 0x30 */
104 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
105 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
106 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
107 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
108 /* 0x40 ~ 0xff */
109 u32 reserve0[48]; /* Reserved */
110 /* 0x100 */
111 u8 dchpri[MPC_DMA_CHANNELS];
112 /* DMA channels(0~63) priority */
113};
114
115struct __attribute__ ((__packed__)) mpc_dma_tcd {
116 /* 0x00 */
117 u32 saddr; /* Source address */
118
119 u32 smod:5; /* Source address modulo */
120 u32 ssize:3; /* Source data transfer size */
121 u32 dmod:5; /* Destination address modulo */
122 u32 dsize:3; /* Destination data transfer size */
123 u32 soff:16; /* Signed source address offset */
124
125 /* 0x08 */
126 u32 nbytes; /* Inner "minor" byte count */
127 u32 slast; /* Last source address adjustment */
128 u32 daddr; /* Destination address */
129
130 /* 0x14 */
131 u32 citer_elink:1; /* Enable channel-to-channel linking on
132 * minor loop complete
133 */
134 u32 citer_linkch:6; /* Link channel for minor loop complete */
135 u32 citer:9; /* Current "major" iteration count */
136 u32 doff:16; /* Signed destination address offset */
137
138 /* 0x18 */
139 u32 dlast_sga; /* Last Destination address adjustment/scatter
140 * gather address
141 */
142
143 /* 0x1c */
144 u32 biter_elink:1; /* Enable channel-to-channel linking on major
145 * loop complete
146 */
147 u32 biter_linkch:6;
148 u32 biter:9; /* Beginning "major" iteration count */
149 u32 bwc:2; /* Bandwidth control */
150 u32 major_linkch:6; /* Link channel number */
151 u32 done:1; /* Channel done */
152 u32 active:1; /* Channel active */
153 u32 major_elink:1; /* Enable channel-to-channel linking on major
154 * loop complete
155 */
156 u32 e_sg:1; /* Enable scatter/gather processing */
157 u32 d_req:1; /* Disable request */
158 u32 int_half:1; /* Enable an interrupt when major counter is
159 * half complete
160 */
161 u32 int_maj:1; /* Enable an interrupt when major iteration
162 * count completes
163 */
164 u32 start:1; /* Channel start */
165};
166
167struct mpc_dma_desc {
168 struct dma_async_tx_descriptor desc;
169 struct mpc_dma_tcd *tcd;
170 dma_addr_t tcd_paddr;
171 int error;
172 struct list_head node;
173};
174
175struct mpc_dma_chan {
176 struct dma_chan chan;
177 struct list_head free;
178 struct list_head prepared;
179 struct list_head queued;
180 struct list_head active;
181 struct list_head completed;
182 struct mpc_dma_tcd *tcd;
183 dma_addr_t tcd_paddr;
184 dma_cookie_t completed_cookie;
185
186 /* Lock for this structure */
187 spinlock_t lock;
188};
189
190struct mpc_dma {
191 struct dma_device dma;
192 struct tasklet_struct tasklet;
193 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
194 struct mpc_dma_regs __iomem *regs;
195 struct mpc_dma_tcd __iomem *tcd;
196 int irq;
197 uint error_status;
198
199 /* Lock for error_status field in this structure */
200 spinlock_t error_status_lock;
201};
202
203#define DRV_NAME "mpc512x_dma"
204
205/* Convert struct dma_chan to struct mpc_dma_chan */
206static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
207{
208 return container_of(c, struct mpc_dma_chan, chan);
209}
210
211/* Convert struct dma_chan to struct mpc_dma */
212static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
213{
214 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
215 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
216}
217
218/*
219 * Execute all queued DMA descriptors.
220 *
221 * Following requirements must be met while calling mpc_dma_execute():
222 * a) mchan->lock is acquired,
223 * b) mchan->active list is empty,
224 * c) mchan->queued list contains at least one entry.
225 */
226static void mpc_dma_execute(struct mpc_dma_chan *mchan)
227{
228 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
229 struct mpc_dma_desc *first = NULL;
230 struct mpc_dma_desc *prev = NULL;
231 struct mpc_dma_desc *mdesc;
232 int cid = mchan->chan.chan_id;
233
234 /* Move all queued descriptors to active list */
235 list_splice_tail_init(&mchan->queued, &mchan->active);
236
237 /* Chain descriptors into one transaction */
238 list_for_each_entry(mdesc, &mchan->active, node) {
239 if (!first)
240 first = mdesc;
241
242 if (!prev) {
243 prev = mdesc;
244 continue;
245 }
246
247 prev->tcd->dlast_sga = mdesc->tcd_paddr;
248 prev->tcd->e_sg = 1;
249 mdesc->tcd->start = 1;
250
251 prev = mdesc;
252 }
253
254 prev->tcd->start = 0;
255 prev->tcd->int_maj = 1;
256
257 /* Send first descriptor in chain into hardware */
258 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
259 out_8(&mdma->regs->dmassrt, cid);
260}
261
262/* Handle interrupt on one half of DMA controller (32 channels) */
263static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
264{
265 struct mpc_dma_chan *mchan;
266 struct mpc_dma_desc *mdesc;
267 u32 status = is | es;
268 int ch;
269
270 while ((ch = fls(status) - 1) >= 0) {
271 status &= ~(1 << ch);
272 mchan = &mdma->channels[ch + off];
273
274 spin_lock(&mchan->lock);
275
276 /* Check error status */
277 if (es & (1 << ch))
278 list_for_each_entry(mdesc, &mchan->active, node)
279 mdesc->error = -EIO;
280
281 /* Execute queued descriptors */
282 list_splice_tail_init(&mchan->active, &mchan->completed);
283 if (!list_empty(&mchan->queued))
284 mpc_dma_execute(mchan);
285
286 spin_unlock(&mchan->lock);
287 }
288}
289
290/* Interrupt handler */
291static irqreturn_t mpc_dma_irq(int irq, void *data)
292{
293 struct mpc_dma *mdma = data;
294 uint es;
295
296 /* Save error status register */
297 es = in_be32(&mdma->regs->dmaes);
298 spin_lock(&mdma->error_status_lock);
299 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
300 mdma->error_status = es;
301 spin_unlock(&mdma->error_status_lock);
302
303 /* Handle interrupt on each channel */
304 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
305 in_be32(&mdma->regs->dmaerrh), 32);
306 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
307 in_be32(&mdma->regs->dmaerrl), 0);
308
309 /* Ack interrupt on all channels */
310 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
311 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
312 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
313 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
314
315 /* Schedule tasklet */
316 tasklet_schedule(&mdma->tasklet);
317
318 return IRQ_HANDLED;
319}
320
321/* DMA Tasklet */
322static void mpc_dma_tasklet(unsigned long data)
323{
324 struct mpc_dma *mdma = (void *)data;
325 dma_cookie_t last_cookie = 0;
326 struct mpc_dma_chan *mchan;
327 struct mpc_dma_desc *mdesc;
328 struct dma_async_tx_descriptor *desc;
329 unsigned long flags;
330 LIST_HEAD(list);
331 uint es;
332 int i;
333
334 spin_lock_irqsave(&mdma->error_status_lock, flags);
335 es = mdma->error_status;
336 mdma->error_status = 0;
337 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
338
339 /* Print nice error report */
340 if (es) {
341 dev_err(mdma->dma.dev,
342 "Hardware reported following error(s) on channel %u:\n",
343 MPC_DMA_DMAES_ERRCHN(es));
344
345 if (es & MPC_DMA_DMAES_GPE)
346 dev_err(mdma->dma.dev, "- Group Priority Error\n");
347 if (es & MPC_DMA_DMAES_CPE)
348 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
349 if (es & MPC_DMA_DMAES_SAE)
350 dev_err(mdma->dma.dev, "- Source Address Error\n");
351 if (es & MPC_DMA_DMAES_SOE)
352 dev_err(mdma->dma.dev, "- Source Offset"
353 " Configuration Error\n");
354 if (es & MPC_DMA_DMAES_DAE)
355 dev_err(mdma->dma.dev, "- Destination Address"
356 " Error\n");
357 if (es & MPC_DMA_DMAES_DOE)
358 dev_err(mdma->dma.dev, "- Destination Offset"
359 " Configuration Error\n");
360 if (es & MPC_DMA_DMAES_NCE)
361 dev_err(mdma->dma.dev, "- NBytes/Citter"
362 " Configuration Error\n");
363 if (es & MPC_DMA_DMAES_SGE)
364 dev_err(mdma->dma.dev, "- Scatter/Gather"
365 " Configuration Error\n");
366 if (es & MPC_DMA_DMAES_SBE)
367 dev_err(mdma->dma.dev, "- Source Bus Error\n");
368 if (es & MPC_DMA_DMAES_DBE)
369 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
370 }
371
372 for (i = 0; i < mdma->dma.chancnt; i++) {
373 mchan = &mdma->channels[i];
374
375 /* Get all completed descriptors */
376 spin_lock_irqsave(&mchan->lock, flags);
377 if (!list_empty(&mchan->completed))
378 list_splice_tail_init(&mchan->completed, &list);
379 spin_unlock_irqrestore(&mchan->lock, flags);
380
381 if (list_empty(&list))
382 continue;
383
384 /* Execute callbacks and run dependencies */
385 list_for_each_entry(mdesc, &list, node) {
386 desc = &mdesc->desc;
387
388 if (desc->callback)
389 desc->callback(desc->callback_param);
390
391 last_cookie = desc->cookie;
392 dma_run_dependencies(desc);
393 }
394
395 /* Free descriptors */
396 spin_lock_irqsave(&mchan->lock, flags);
397 list_splice_tail_init(&list, &mchan->free);
398 mchan->completed_cookie = last_cookie;
399 spin_unlock_irqrestore(&mchan->lock, flags);
400 }
401}
402
403/* Submit descriptor to hardware */
404static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
405{
406 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
407 struct mpc_dma_desc *mdesc;
408 unsigned long flags;
409 dma_cookie_t cookie;
410
411 mdesc = container_of(txd, struct mpc_dma_desc, desc);
412
413 spin_lock_irqsave(&mchan->lock, flags);
414
415 /* Move descriptor to queue */
416 list_move_tail(&mdesc->node, &mchan->queued);
417
418 /* If channel is idle, execute all queued descriptors */
419 if (list_empty(&mchan->active))
420 mpc_dma_execute(mchan);
421
422 /* Update cookie */
423 cookie = mchan->chan.cookie + 1;
424 if (cookie <= 0)
425 cookie = 1;
426
427 mchan->chan.cookie = cookie;
428 mdesc->desc.cookie = cookie;
429
430 spin_unlock_irqrestore(&mchan->lock, flags);
431
432 return cookie;
433}
434
435/* Alloc channel resources */
436static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
437{
438 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
439 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
440 struct mpc_dma_desc *mdesc;
441 struct mpc_dma_tcd *tcd;
442 dma_addr_t tcd_paddr;
443 unsigned long flags;
444 LIST_HEAD(descs);
445 int i;
446
447 /* Alloc DMA memory for Transfer Control Descriptors */
448 tcd = dma_alloc_coherent(mdma->dma.dev,
449 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
450 &tcd_paddr, GFP_KERNEL);
451 if (!tcd)
452 return -ENOMEM;
453
454 /* Alloc descriptors for this channel */
455 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
456 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
457 if (!mdesc) {
458 dev_notice(mdma->dma.dev, "Memory allocation error. "
459 "Allocated only %u descriptors\n", i);
460 break;
461 }
462
463 dma_async_tx_descriptor_init(&mdesc->desc, chan);
464 mdesc->desc.flags = DMA_CTRL_ACK;
465 mdesc->desc.tx_submit = mpc_dma_tx_submit;
466
467 mdesc->tcd = &tcd[i];
468 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
469
470 list_add_tail(&mdesc->node, &descs);
471 }
472
473 /* Return error only if no descriptors were allocated */
474 if (i == 0) {
475 dma_free_coherent(mdma->dma.dev,
476 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
477 tcd, tcd_paddr);
478 return -ENOMEM;
479 }
480
481 spin_lock_irqsave(&mchan->lock, flags);
482 mchan->tcd = tcd;
483 mchan->tcd_paddr = tcd_paddr;
484 list_splice_tail_init(&descs, &mchan->free);
485 spin_unlock_irqrestore(&mchan->lock, flags);
486
487 /* Enable Error Interrupt */
488 out_8(&mdma->regs->dmaseei, chan->chan_id);
489
490 return 0;
491}
492
493/* Free channel resources */
494static void mpc_dma_free_chan_resources(struct dma_chan *chan)
495{
496 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
497 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
498 struct mpc_dma_desc *mdesc, *tmp;
499 struct mpc_dma_tcd *tcd;
500 dma_addr_t tcd_paddr;
501 unsigned long flags;
502 LIST_HEAD(descs);
503
504 spin_lock_irqsave(&mchan->lock, flags);
505
506 /* Channel must be idle */
507 BUG_ON(!list_empty(&mchan->prepared));
508 BUG_ON(!list_empty(&mchan->queued));
509 BUG_ON(!list_empty(&mchan->active));
510 BUG_ON(!list_empty(&mchan->completed));
511
512 /* Move data */
513 list_splice_tail_init(&mchan->free, &descs);
514 tcd = mchan->tcd;
515 tcd_paddr = mchan->tcd_paddr;
516
517 spin_unlock_irqrestore(&mchan->lock, flags);
518
519 /* Free DMA memory used by descriptors */
520 dma_free_coherent(mdma->dma.dev,
521 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
522 tcd, tcd_paddr);
523
524 /* Free descriptors */
525 list_for_each_entry_safe(mdesc, tmp, &descs, node)
526 kfree(mdesc);
527
528 /* Disable Error Interrupt */
529 out_8(&mdma->regs->dmaceei, chan->chan_id);
530}
531
532/* Send all pending descriptor to hardware */
533static void mpc_dma_issue_pending(struct dma_chan *chan)
534{
535 /*
536 * We are posting descriptors to the hardware as soon as
537 * they are ready, so this function does nothing.
538 */
539}
540
541/* Check request completion status */
542static enum dma_status
543mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
544 dma_cookie_t *done, dma_cookie_t *used)
545{
546 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
547 unsigned long flags;
548 dma_cookie_t last_used;
549 dma_cookie_t last_complete;
550
551 spin_lock_irqsave(&mchan->lock, flags);
552 last_used = mchan->chan.cookie;
553 last_complete = mchan->completed_cookie;
554 spin_unlock_irqrestore(&mchan->lock, flags);
555
556 if (done)
557 *done = last_complete;
558
559 if (used)
560 *used = last_used;
561
562 return dma_async_is_complete(cookie, last_complete, last_used);
563}
564
565/* Prepare descriptor for memory to memory copy */
566static struct dma_async_tx_descriptor *
567mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
568 size_t len, unsigned long flags)
569{
570 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
571 struct mpc_dma_desc *mdesc = NULL;
572 struct mpc_dma_tcd *tcd;
573 unsigned long iflags;
574
575 /* Get free descriptor */
576 spin_lock_irqsave(&mchan->lock, iflags);
577 if (!list_empty(&mchan->free)) {
578 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
579 node);
580 list_del(&mdesc->node);
581 }
582 spin_unlock_irqrestore(&mchan->lock, iflags);
583
584 if (!mdesc)
585 return NULL;
586
587 mdesc->error = 0;
588 tcd = mdesc->tcd;
589
590 /* Prepare Transfer Control Descriptor for this transaction */
591 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
592
593 if (IS_ALIGNED(src | dst | len, 32)) {
594 tcd->ssize = MPC_DMA_TSIZE_32;
595 tcd->dsize = MPC_DMA_TSIZE_32;
596 tcd->soff = 32;
597 tcd->doff = 32;
598 } else if (IS_ALIGNED(src | dst | len, 16)) {
599 tcd->ssize = MPC_DMA_TSIZE_16;
600 tcd->dsize = MPC_DMA_TSIZE_16;
601 tcd->soff = 16;
602 tcd->doff = 16;
603 } else if (IS_ALIGNED(src | dst | len, 4)) {
604 tcd->ssize = MPC_DMA_TSIZE_4;
605 tcd->dsize = MPC_DMA_TSIZE_4;
606 tcd->soff = 4;
607 tcd->doff = 4;
608 } else if (IS_ALIGNED(src | dst | len, 2)) {
609 tcd->ssize = MPC_DMA_TSIZE_2;
610 tcd->dsize = MPC_DMA_TSIZE_2;
611 tcd->soff = 2;
612 tcd->doff = 2;
613 } else {
614 tcd->ssize = MPC_DMA_TSIZE_1;
615 tcd->dsize = MPC_DMA_TSIZE_1;
616 tcd->soff = 1;
617 tcd->doff = 1;
618 }
619
620 tcd->saddr = src;
621 tcd->daddr = dst;
622 tcd->nbytes = len;
623 tcd->biter = 1;
624 tcd->citer = 1;
625
626 /* Place descriptor in prepared list */
627 spin_lock_irqsave(&mchan->lock, iflags);
628 list_add_tail(&mdesc->node, &mchan->prepared);
629 spin_unlock_irqrestore(&mchan->lock, iflags);
630
631 return &mdesc->desc;
632}
633
634static int __devinit mpc_dma_probe(struct of_device *op,
635 const struct of_device_id *match)
636{
637 struct device_node *dn = op->node;
638 struct device *dev = &op->dev;
639 struct dma_device *dma;
640 struct mpc_dma *mdma;
641 struct mpc_dma_chan *mchan;
642 struct resource res;
643 ulong regs_start, regs_size;
644 int retval, i;
645
646 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
647 if (!mdma) {
648 dev_err(dev, "Memory exhausted!\n");
649 return -ENOMEM;
650 }
651
652 mdma->irq = irq_of_parse_and_map(dn, 0);
653 if (mdma->irq == NO_IRQ) {
654 dev_err(dev, "Error mapping IRQ!\n");
655 return -EINVAL;
656 }
657
658 retval = of_address_to_resource(dn, 0, &res);
659 if (retval) {
660 dev_err(dev, "Error parsing memory region!\n");
661 return retval;
662 }
663
664 regs_start = res.start;
665 regs_size = res.end - res.start + 1;
666
667 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
668 dev_err(dev, "Error requesting memory region!\n");
669 return -EBUSY;
670 }
671
672 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
673 if (!mdma->regs) {
674 dev_err(dev, "Error mapping memory region!\n");
675 return -ENOMEM;
676 }
677
678 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
679 + MPC_DMA_TCD_OFFSET);
680
681 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
682 mdma);
683 if (retval) {
684 dev_err(dev, "Error requesting IRQ!\n");
685 return -EINVAL;
686 }
687
688 spin_lock_init(&mdma->error_status_lock);
689
690 dma = &mdma->dma;
691 dma->dev = dev;
692 dma->chancnt = MPC_DMA_CHANNELS;
693 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
694 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
695 dma->device_issue_pending = mpc_dma_issue_pending;
696 dma->device_is_tx_complete = mpc_dma_is_tx_complete;
697 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
698
699 INIT_LIST_HEAD(&dma->channels);
700 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
701
702 for (i = 0; i < dma->chancnt; i++) {
703 mchan = &mdma->channels[i];
704
705 mchan->chan.device = dma;
706 mchan->chan.chan_id = i;
707 mchan->chan.cookie = 1;
708 mchan->completed_cookie = mchan->chan.cookie;
709
710 INIT_LIST_HEAD(&mchan->free);
711 INIT_LIST_HEAD(&mchan->prepared);
712 INIT_LIST_HEAD(&mchan->queued);
713 INIT_LIST_HEAD(&mchan->active);
714 INIT_LIST_HEAD(&mchan->completed);
715
716 spin_lock_init(&mchan->lock);
717 list_add_tail(&mchan->chan.device_node, &dma->channels);
718 }
719
720 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
721
722 /*
723 * Configure DMA Engine:
724 * - Dynamic clock,
725 * - Round-robin group arbitration,
726 * - Round-robin channel arbitration.
727 */
728 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
729 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
730
731 /* Disable hardware DMA requests */
732 out_be32(&mdma->regs->dmaerqh, 0);
733 out_be32(&mdma->regs->dmaerql, 0);
734
735 /* Disable error interrupts */
736 out_be32(&mdma->regs->dmaeeih, 0);
737 out_be32(&mdma->regs->dmaeeil, 0);
738
739 /* Clear interrupts status */
740 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
741 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
742 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
743 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
744
745 /* Route interrupts to IPIC */
746 out_be32(&mdma->regs->dmaihsa, 0);
747 out_be32(&mdma->regs->dmailsa, 0);
748
749 /* Register DMA engine */
750 dev_set_drvdata(dev, mdma);
751 retval = dma_async_device_register(dma);
752 if (retval) {
753 devm_free_irq(dev, mdma->irq, mdma);
754 irq_dispose_mapping(mdma->irq);
755 }
756
757 return retval;
758}
759
760static int __devexit mpc_dma_remove(struct of_device *op)
761{
762 struct device *dev = &op->dev;
763 struct mpc_dma *mdma = dev_get_drvdata(dev);
764
765 dma_async_device_unregister(&mdma->dma);
766 devm_free_irq(dev, mdma->irq, mdma);
767 irq_dispose_mapping(mdma->irq);
768
769 return 0;
770}
771
772static struct of_device_id mpc_dma_match[] = {
773 { .compatible = "fsl,mpc5121-dma", },
774 {},
775};
776
777static struct of_platform_driver mpc_dma_driver = {
778 .match_table = mpc_dma_match,
779 .probe = mpc_dma_probe,
780 .remove = __devexit_p(mpc_dma_remove),
781 .driver = {
782 .name = DRV_NAME,
783 .owner = THIS_MODULE,
784 },
785};
786
787static int __init mpc_dma_init(void)
788{
789 return of_register_platform_driver(&mpc_dma_driver);
790}
791module_init(mpc_dma_init);
792
793static void __exit mpc_dma_exit(void)
794{
795 of_unregister_platform_driver(&mpc_dma_driver);
796}
797module_exit(mpc_dma_exit);
798
799MODULE_LICENSE("GPL");
800MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 0a3478e910f0..e69d87f24a25 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4940,7 +4940,7 @@ out_free:
4940 return ret; 4940 return ret;
4941} 4941}
4942 4942
4943static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { 4943static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
4944 { .compatible = "ibm,dma-440spe", }, 4944 { .compatible = "ibm,dma-440spe", },
4945 { .compatible = "amcc,xor-accelerator", }, 4945 { .compatible = "amcc,xor-accelerator", },
4946 {}, 4946 {},
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3391e6739d06..cf17dbb8014f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,7 +13,7 @@ module_param(report_gart_errors, int, 0644);
13static int ecc_enable_override; 13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644); 14module_param(ecc_enable_override, int, 0644);
15 15
16static struct msr *msrs; 16static struct msr __percpu *msrs;
17 17
18/* Lookup table for all possible MC control instances */ 18/* Lookup table for all possible MC control instances */
19struct amd64_pvt; 19struct amd64_pvt;
@@ -2553,14 +2553,14 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2553 2553
2554 if (on) { 2554 if (on) {
2555 if (reg->l & K8_MSR_MCGCTL_NBE) 2555 if (reg->l & K8_MSR_MCGCTL_NBE)
2556 pvt->flags.ecc_report = 1; 2556 pvt->flags.nb_mce_enable = 1;
2557 2557
2558 reg->l |= K8_MSR_MCGCTL_NBE; 2558 reg->l |= K8_MSR_MCGCTL_NBE;
2559 } else { 2559 } else {
2560 /* 2560 /*
2561 * Turn off ECC reporting only when it was off before 2561 * Turn off NB MCE reporting only when it was off before
2562 */ 2562 */
2563 if (!pvt->flags.ecc_report) 2563 if (!pvt->flags.nb_mce_enable)
2564 reg->l &= ~K8_MSR_MCGCTL_NBE; 2564 reg->l &= ~K8_MSR_MCGCTL_NBE;
2565 } 2565 }
2566 } 2566 }
@@ -2571,22 +2571,11 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2571 return 0; 2571 return 0;
2572} 2572}
2573 2573
2574/*
2575 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2576 * enable it.
2577 */
2578static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) 2574static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2579{ 2575{
2580 struct amd64_pvt *pvt = mci->pvt_info; 2576 struct amd64_pvt *pvt = mci->pvt_info;
2581 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2577 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2582 2578
2583 if (!ecc_enable_override)
2584 return;
2585
2586 amd64_printk(KERN_WARNING,
2587 "'ecc_enable_override' parameter is active, "
2588 "Enabling AMD ECC hardware now: CAUTION\n");
2589
2590 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); 2579 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2591 2580
2592 /* turn on UECCn and CECCEn bits */ 2581 /* turn on UECCn and CECCEn bits */
@@ -2611,6 +2600,8 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2611 "This node reports that DRAM ECC is " 2600 "This node reports that DRAM ECC is "
2612 "currently Disabled; ENABLING now\n"); 2601 "currently Disabled; ENABLING now\n");
2613 2602
2603 pvt->flags.nb_ecc_prev = 0;
2604
2614 /* Attempt to turn on DRAM ECC Enable */ 2605 /* Attempt to turn on DRAM ECC Enable */
2615 value |= K8_NBCFG_ECC_ENABLE; 2606 value |= K8_NBCFG_ECC_ENABLE;
2616 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); 2607 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
@@ -2625,7 +2616,10 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2625 amd64_printk(KERN_DEBUG, 2616 amd64_printk(KERN_DEBUG,
2626 "Hardware accepted DRAM ECC Enable\n"); 2617 "Hardware accepted DRAM ECC Enable\n");
2627 } 2618 }
2619 } else {
2620 pvt->flags.nb_ecc_prev = 1;
2628 } 2621 }
2622
2629 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, 2623 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2630 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2624 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2631 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); 2625 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
@@ -2644,12 +2638,18 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2644 value &= ~mask; 2638 value &= ~mask;
2645 value |= pvt->old_nbctl; 2639 value |= pvt->old_nbctl;
2646 2640
2647 /* restore the NB Enable MCGCTL bit */
2648 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2641 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2649 2642
2643 /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
2644 if (!pvt->flags.nb_ecc_prev) {
2645 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2646 value &= ~K8_NBCFG_ECC_ENABLE;
2647 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2648 }
2649
2650 /* restore the NB Enable MCGCTL bit */
2650 if (amd64_toggle_ecc_err_reporting(pvt, OFF)) 2651 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2651 amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " 2652 amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
2652 "MCGCTL!\n");
2653} 2653}
2654 2654
2655/* 2655/*
@@ -2690,8 +2690,9 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2690 if (!ecc_enable_override) { 2690 if (!ecc_enable_override) {
2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg); 2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg);
2692 return -ENODEV; 2692 return -ENODEV;
2693 } else {
2694 amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
2693 } 2695 }
2694 ecc_enable_override = 0;
2695 } 2696 }
2696 2697
2697 return 0; 2698 return 0;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 41bc561e5981..0d4bf5638243 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -487,7 +487,8 @@ struct amd64_pvt {
487 /* misc settings */ 487 /* misc settings */
488 struct flags { 488 struct flags {
489 unsigned long cf8_extcfg:1; 489 unsigned long cf8_extcfg:1;
490 unsigned long ecc_report:1; 490 unsigned long nb_mce_enable:1;
491 unsigned long nb_ecc_prev:1;
491 } flags; 492 } flags;
492}; 493};
493 494
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 4eeaed57e219..8be720b278b7 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -25,6 +25,7 @@
25#include <linux/firewire.h> 25#include <linux/firewire.h>
26#include <linux/firewire-cdev.h> 26#include <linux/firewire-cdev.h>
27#include <linux/idr.h> 27#include <linux/idr.h>
28#include <linux/irqflags.h>
28#include <linux/jiffies.h> 29#include <linux/jiffies.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
30#include <linux/kref.h> 31#include <linux/kref.h>
@@ -32,7 +33,6 @@
32#include <linux/module.h> 33#include <linux/module.h>
33#include <linux/mutex.h> 34#include <linux/mutex.h>
34#include <linux/poll.h> 35#include <linux/poll.h>
35#include <linux/preempt.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/string.h> 38#include <linux/string.h>
@@ -368,39 +368,56 @@ void fw_device_cdev_remove(struct fw_device *device)
368 for_each_client(device, wake_up_client); 368 for_each_client(device, wake_up_client);
369} 369}
370 370
371static int ioctl_get_info(struct client *client, void *buffer) 371union ioctl_arg {
372 struct fw_cdev_get_info get_info;
373 struct fw_cdev_send_request send_request;
374 struct fw_cdev_allocate allocate;
375 struct fw_cdev_deallocate deallocate;
376 struct fw_cdev_send_response send_response;
377 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
378 struct fw_cdev_add_descriptor add_descriptor;
379 struct fw_cdev_remove_descriptor remove_descriptor;
380 struct fw_cdev_create_iso_context create_iso_context;
381 struct fw_cdev_queue_iso queue_iso;
382 struct fw_cdev_start_iso start_iso;
383 struct fw_cdev_stop_iso stop_iso;
384 struct fw_cdev_get_cycle_timer get_cycle_timer;
385 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
386 struct fw_cdev_send_stream_packet send_stream_packet;
387 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
388};
389
390static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
372{ 391{
373 struct fw_cdev_get_info *get_info = buffer; 392 struct fw_cdev_get_info *a = &arg->get_info;
374 struct fw_cdev_event_bus_reset bus_reset; 393 struct fw_cdev_event_bus_reset bus_reset;
375 unsigned long ret = 0; 394 unsigned long ret = 0;
376 395
377 client->version = get_info->version; 396 client->version = a->version;
378 get_info->version = FW_CDEV_VERSION; 397 a->version = FW_CDEV_VERSION;
379 get_info->card = client->device->card->index; 398 a->card = client->device->card->index;
380 399
381 down_read(&fw_device_rwsem); 400 down_read(&fw_device_rwsem);
382 401
383 if (get_info->rom != 0) { 402 if (a->rom != 0) {
384 void __user *uptr = u64_to_uptr(get_info->rom); 403 size_t want = a->rom_length;
385 size_t want = get_info->rom_length;
386 size_t have = client->device->config_rom_length * 4; 404 size_t have = client->device->config_rom_length * 4;
387 405
388 ret = copy_to_user(uptr, client->device->config_rom, 406 ret = copy_to_user(u64_to_uptr(a->rom),
389 min(want, have)); 407 client->device->config_rom, min(want, have));
390 } 408 }
391 get_info->rom_length = client->device->config_rom_length * 4; 409 a->rom_length = client->device->config_rom_length * 4;
392 410
393 up_read(&fw_device_rwsem); 411 up_read(&fw_device_rwsem);
394 412
395 if (ret != 0) 413 if (ret != 0)
396 return -EFAULT; 414 return -EFAULT;
397 415
398 client->bus_reset_closure = get_info->bus_reset_closure; 416 client->bus_reset_closure = a->bus_reset_closure;
399 if (get_info->bus_reset != 0) { 417 if (a->bus_reset != 0) {
400 void __user *uptr = u64_to_uptr(get_info->bus_reset);
401
402 fill_bus_reset_event(&bus_reset, client); 418 fill_bus_reset_event(&bus_reset, client);
403 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 419 if (copy_to_user(u64_to_uptr(a->bus_reset),
420 &bus_reset, sizeof(bus_reset)))
404 return -EFAULT; 421 return -EFAULT;
405 } 422 }
406 423
@@ -571,11 +588,9 @@ static int init_request(struct client *client,
571 return ret; 588 return ret;
572} 589}
573 590
574static int ioctl_send_request(struct client *client, void *buffer) 591static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
575{ 592{
576 struct fw_cdev_send_request *request = buffer; 593 switch (arg->send_request.tcode) {
577
578 switch (request->tcode) {
579 case TCODE_WRITE_QUADLET_REQUEST: 594 case TCODE_WRITE_QUADLET_REQUEST:
580 case TCODE_WRITE_BLOCK_REQUEST: 595 case TCODE_WRITE_BLOCK_REQUEST:
581 case TCODE_READ_QUADLET_REQUEST: 596 case TCODE_READ_QUADLET_REQUEST:
@@ -592,7 +607,7 @@ static int ioctl_send_request(struct client *client, void *buffer)
592 return -EINVAL; 607 return -EINVAL;
593 } 608 }
594 609
595 return init_request(client, request, client->device->node_id, 610 return init_request(client, &arg->send_request, client->device->node_id,
596 client->device->max_speed); 611 client->device->max_speed);
597} 612}
598 613
@@ -683,9 +698,9 @@ static void release_address_handler(struct client *client,
683 kfree(r); 698 kfree(r);
684} 699}
685 700
686static int ioctl_allocate(struct client *client, void *buffer) 701static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
687{ 702{
688 struct fw_cdev_allocate *request = buffer; 703 struct fw_cdev_allocate *a = &arg->allocate;
689 struct address_handler_resource *r; 704 struct address_handler_resource *r;
690 struct fw_address_region region; 705 struct fw_address_region region;
691 int ret; 706 int ret;
@@ -694,13 +709,13 @@ static int ioctl_allocate(struct client *client, void *buffer)
694 if (r == NULL) 709 if (r == NULL)
695 return -ENOMEM; 710 return -ENOMEM;
696 711
697 region.start = request->offset; 712 region.start = a->offset;
698 region.end = request->offset + request->length; 713 region.end = a->offset + a->length;
699 r->handler.length = request->length; 714 r->handler.length = a->length;
700 r->handler.address_callback = handle_request; 715 r->handler.address_callback = handle_request;
701 r->handler.callback_data = r; 716 r->handler.callback_data = r;
702 r->closure = request->closure; 717 r->closure = a->closure;
703 r->client = client; 718 r->client = client;
704 719
705 ret = fw_core_add_address_handler(&r->handler, &region); 720 ret = fw_core_add_address_handler(&r->handler, &region);
706 if (ret < 0) { 721 if (ret < 0) {
@@ -714,27 +729,25 @@ static int ioctl_allocate(struct client *client, void *buffer)
714 release_address_handler(client, &r->resource); 729 release_address_handler(client, &r->resource);
715 return ret; 730 return ret;
716 } 731 }
717 request->handle = r->resource.handle; 732 a->handle = r->resource.handle;
718 733
719 return 0; 734 return 0;
720} 735}
721 736
722static int ioctl_deallocate(struct client *client, void *buffer) 737static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
723{ 738{
724 struct fw_cdev_deallocate *request = buffer; 739 return release_client_resource(client, arg->deallocate.handle,
725
726 return release_client_resource(client, request->handle,
727 release_address_handler, NULL); 740 release_address_handler, NULL);
728} 741}
729 742
730static int ioctl_send_response(struct client *client, void *buffer) 743static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
731{ 744{
732 struct fw_cdev_send_response *request = buffer; 745 struct fw_cdev_send_response *a = &arg->send_response;
733 struct client_resource *resource; 746 struct client_resource *resource;
734 struct inbound_transaction_resource *r; 747 struct inbound_transaction_resource *r;
735 int ret = 0; 748 int ret = 0;
736 749
737 if (release_client_resource(client, request->handle, 750 if (release_client_resource(client, a->handle,
738 release_request, &resource) < 0) 751 release_request, &resource) < 0)
739 return -EINVAL; 752 return -EINVAL;
740 753
@@ -743,28 +756,24 @@ static int ioctl_send_response(struct client *client, void *buffer)
743 if (is_fcp_request(r->request)) 756 if (is_fcp_request(r->request))
744 goto out; 757 goto out;
745 758
746 if (request->length < r->length) 759 if (a->length < r->length)
747 r->length = request->length; 760 r->length = a->length;
748 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { 761 if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
749 ret = -EFAULT; 762 ret = -EFAULT;
750 kfree(r->request); 763 kfree(r->request);
751 goto out; 764 goto out;
752 } 765 }
753 fw_send_response(client->device->card, r->request, request->rcode); 766 fw_send_response(client->device->card, r->request, a->rcode);
754 out: 767 out:
755 kfree(r); 768 kfree(r);
756 769
757 return ret; 770 return ret;
758} 771}
759 772
760static int ioctl_initiate_bus_reset(struct client *client, void *buffer) 773static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
761{ 774{
762 struct fw_cdev_initiate_bus_reset *request = buffer; 775 return fw_core_initiate_bus_reset(client->device->card,
763 int short_reset; 776 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
764
765 short_reset = (request->type == FW_CDEV_SHORT_RESET);
766
767 return fw_core_initiate_bus_reset(client->device->card, short_reset);
768} 777}
769 778
770static void release_descriptor(struct client *client, 779static void release_descriptor(struct client *client,
@@ -777,9 +786,9 @@ static void release_descriptor(struct client *client,
777 kfree(r); 786 kfree(r);
778} 787}
779 788
780static int ioctl_add_descriptor(struct client *client, void *buffer) 789static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
781{ 790{
782 struct fw_cdev_add_descriptor *request = buffer; 791 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
783 struct descriptor_resource *r; 792 struct descriptor_resource *r;
784 int ret; 793 int ret;
785 794
@@ -787,22 +796,21 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
787 if (!client->device->is_local) 796 if (!client->device->is_local)
788 return -ENOSYS; 797 return -ENOSYS;
789 798
790 if (request->length > 256) 799 if (a->length > 256)
791 return -EINVAL; 800 return -EINVAL;
792 801
793 r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); 802 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
794 if (r == NULL) 803 if (r == NULL)
795 return -ENOMEM; 804 return -ENOMEM;
796 805
797 if (copy_from_user(r->data, 806 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
798 u64_to_uptr(request->data), request->length * 4)) {
799 ret = -EFAULT; 807 ret = -EFAULT;
800 goto failed; 808 goto failed;
801 } 809 }
802 810
803 r->descriptor.length = request->length; 811 r->descriptor.length = a->length;
804 r->descriptor.immediate = request->immediate; 812 r->descriptor.immediate = a->immediate;
805 r->descriptor.key = request->key; 813 r->descriptor.key = a->key;
806 r->descriptor.data = r->data; 814 r->descriptor.data = r->data;
807 815
808 ret = fw_core_add_descriptor(&r->descriptor); 816 ret = fw_core_add_descriptor(&r->descriptor);
@@ -815,7 +823,7 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
815 fw_core_remove_descriptor(&r->descriptor); 823 fw_core_remove_descriptor(&r->descriptor);
816 goto failed; 824 goto failed;
817 } 825 }
818 request->handle = r->resource.handle; 826 a->handle = r->resource.handle;
819 827
820 return 0; 828 return 0;
821 failed: 829 failed:
@@ -824,11 +832,9 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
824 return ret; 832 return ret;
825} 833}
826 834
827static int ioctl_remove_descriptor(struct client *client, void *buffer) 835static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
828{ 836{
829 struct fw_cdev_remove_descriptor *request = buffer; 837 return release_client_resource(client, arg->remove_descriptor.handle,
830
831 return release_client_resource(client, request->handle,
832 release_descriptor, NULL); 838 release_descriptor, NULL);
833} 839}
834 840
@@ -851,49 +857,44 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
851 sizeof(e->interrupt) + header_length, NULL, 0); 857 sizeof(e->interrupt) + header_length, NULL, 0);
852} 858}
853 859
854static int ioctl_create_iso_context(struct client *client, void *buffer) 860static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
855{ 861{
856 struct fw_cdev_create_iso_context *request = buffer; 862 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
857 struct fw_iso_context *context; 863 struct fw_iso_context *context;
858 864
859 /* We only support one context at this time. */ 865 /* We only support one context at this time. */
860 if (client->iso_context != NULL) 866 if (client->iso_context != NULL)
861 return -EBUSY; 867 return -EBUSY;
862 868
863 if (request->channel > 63) 869 if (a->channel > 63)
864 return -EINVAL; 870 return -EINVAL;
865 871
866 switch (request->type) { 872 switch (a->type) {
867 case FW_ISO_CONTEXT_RECEIVE: 873 case FW_ISO_CONTEXT_RECEIVE:
868 if (request->header_size < 4 || (request->header_size & 3)) 874 if (a->header_size < 4 || (a->header_size & 3))
869 return -EINVAL; 875 return -EINVAL;
870
871 break; 876 break;
872 877
873 case FW_ISO_CONTEXT_TRANSMIT: 878 case FW_ISO_CONTEXT_TRANSMIT:
874 if (request->speed > SCODE_3200) 879 if (a->speed > SCODE_3200)
875 return -EINVAL; 880 return -EINVAL;
876
877 break; 881 break;
878 882
879 default: 883 default:
880 return -EINVAL; 884 return -EINVAL;
881 } 885 }
882 886
883 context = fw_iso_context_create(client->device->card, 887 context = fw_iso_context_create(client->device->card, a->type,
884 request->type, 888 a->channel, a->speed, a->header_size,
885 request->channel, 889 iso_callback, client);
886 request->speed,
887 request->header_size,
888 iso_callback, client);
889 if (IS_ERR(context)) 890 if (IS_ERR(context))
890 return PTR_ERR(context); 891 return PTR_ERR(context);
891 892
892 client->iso_closure = request->closure; 893 client->iso_closure = a->closure;
893 client->iso_context = context; 894 client->iso_context = context;
894 895
895 /* We only support one context at this time. */ 896 /* We only support one context at this time. */
896 request->handle = 0; 897 a->handle = 0;
897 898
898 return 0; 899 return 0;
899} 900}
@@ -906,9 +907,9 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
906#define GET_SY(v) (((v) >> 20) & 0x0f) 907#define GET_SY(v) (((v) >> 20) & 0x0f)
907#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) 908#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
908 909
909static int ioctl_queue_iso(struct client *client, void *buffer) 910static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
910{ 911{
911 struct fw_cdev_queue_iso *request = buffer; 912 struct fw_cdev_queue_iso *a = &arg->queue_iso;
912 struct fw_cdev_iso_packet __user *p, *end, *next; 913 struct fw_cdev_iso_packet __user *p, *end, *next;
913 struct fw_iso_context *ctx = client->iso_context; 914 struct fw_iso_context *ctx = client->iso_context;
914 unsigned long payload, buffer_end, header_length; 915 unsigned long payload, buffer_end, header_length;
@@ -919,7 +920,7 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
919 u8 header[256]; 920 u8 header[256];
920 } u; 921 } u;
921 922
922 if (ctx == NULL || request->handle != 0) 923 if (ctx == NULL || a->handle != 0)
923 return -EINVAL; 924 return -EINVAL;
924 925
925 /* 926 /*
@@ -929,23 +930,23 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
929 * set them both to 0, which will still let packets with 930 * set them both to 0, which will still let packets with
930 * payload_length == 0 through. In other words, if no packets 931 * payload_length == 0 through. In other words, if no packets
931 * use the indirect payload, the iso buffer need not be mapped 932 * use the indirect payload, the iso buffer need not be mapped
932 * and the request->data pointer is ignored. 933 * and the a->data pointer is ignored.
933 */ 934 */
934 935
935 payload = (unsigned long)request->data - client->vm_start; 936 payload = (unsigned long)a->data - client->vm_start;
936 buffer_end = client->buffer.page_count << PAGE_SHIFT; 937 buffer_end = client->buffer.page_count << PAGE_SHIFT;
937 if (request->data == 0 || client->buffer.pages == NULL || 938 if (a->data == 0 || client->buffer.pages == NULL ||
938 payload >= buffer_end) { 939 payload >= buffer_end) {
939 payload = 0; 940 payload = 0;
940 buffer_end = 0; 941 buffer_end = 0;
941 } 942 }
942 943
943 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); 944 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
944 945
945 if (!access_ok(VERIFY_READ, p, request->size)) 946 if (!access_ok(VERIFY_READ, p, a->size))
946 return -EFAULT; 947 return -EFAULT;
947 948
948 end = (void __user *)p + request->size; 949 end = (void __user *)p + a->size;
949 count = 0; 950 count = 0;
950 while (p < end) { 951 while (p < end) {
951 if (get_user(control, &p->control)) 952 if (get_user(control, &p->control))
@@ -995,61 +996,78 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
995 count++; 996 count++;
996 } 997 }
997 998
998 request->size -= uptr_to_u64(p) - request->packets; 999 a->size -= uptr_to_u64(p) - a->packets;
999 request->packets = uptr_to_u64(p); 1000 a->packets = uptr_to_u64(p);
1000 request->data = client->vm_start + payload; 1001 a->data = client->vm_start + payload;
1001 1002
1002 return count; 1003 return count;
1003} 1004}
1004 1005
1005static int ioctl_start_iso(struct client *client, void *buffer) 1006static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1006{ 1007{
1007 struct fw_cdev_start_iso *request = buffer; 1008 struct fw_cdev_start_iso *a = &arg->start_iso;
1008 1009
1009 if (client->iso_context == NULL || request->handle != 0) 1010 if (client->iso_context == NULL || a->handle != 0)
1010 return -EINVAL; 1011 return -EINVAL;
1011 1012
1012 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { 1013 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1013 if (request->tags == 0 || request->tags > 15) 1014 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1014 return -EINVAL; 1015 return -EINVAL;
1015
1016 if (request->sync > 15)
1017 return -EINVAL;
1018 }
1019 1016
1020 return fw_iso_context_start(client->iso_context, request->cycle, 1017 return fw_iso_context_start(client->iso_context,
1021 request->sync, request->tags); 1018 a->cycle, a->sync, a->tags);
1022} 1019}
1023 1020
1024static int ioctl_stop_iso(struct client *client, void *buffer) 1021static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1025{ 1022{
1026 struct fw_cdev_stop_iso *request = buffer; 1023 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1027 1024
1028 if (client->iso_context == NULL || request->handle != 0) 1025 if (client->iso_context == NULL || a->handle != 0)
1029 return -EINVAL; 1026 return -EINVAL;
1030 1027
1031 return fw_iso_context_stop(client->iso_context); 1028 return fw_iso_context_stop(client->iso_context);
1032} 1029}
1033 1030
1034static int ioctl_get_cycle_timer(struct client *client, void *buffer) 1031static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1035{ 1032{
1036 struct fw_cdev_get_cycle_timer *request = buffer; 1033 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1037 struct fw_card *card = client->device->card; 1034 struct fw_card *card = client->device->card;
1038 unsigned long long bus_time; 1035 struct timespec ts = {0, 0};
1039 struct timeval tv; 1036 u32 cycle_time;
1040 unsigned long flags; 1037 int ret = 0;
1038
1039 local_irq_disable();
1040
1041 cycle_time = card->driver->get_cycle_time(card);
1041 1042
1042 preempt_disable(); 1043 switch (a->clk_id) {
1043 local_irq_save(flags); 1044 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1045 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
1046 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1047 default:
1048 ret = -EINVAL;
1049 }
1044 1050
1045 bus_time = card->driver->get_bus_time(card); 1051 local_irq_enable();
1046 do_gettimeofday(&tv);
1047 1052
1048 local_irq_restore(flags); 1053 a->tv_sec = ts.tv_sec;
1049 preempt_enable(); 1054 a->tv_nsec = ts.tv_nsec;
1055 a->cycle_timer = cycle_time;
1056
1057 return ret;
1058}
1059
1060static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1061{
1062 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1063 struct fw_cdev_get_cycle_timer2 ct2;
1064
1065 ct2.clk_id = CLOCK_REALTIME;
1066 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1067
1068 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1069 a->cycle_timer = ct2.cycle_timer;
1050 1070
1051 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
1052 request->cycle_timer = bus_time & 0xffffffff;
1053 return 0; 1071 return 0;
1054} 1072}
1055 1073
@@ -1220,33 +1238,32 @@ static int init_iso_resource(struct client *client,
1220 return ret; 1238 return ret;
1221} 1239}
1222 1240
1223static int ioctl_allocate_iso_resource(struct client *client, void *buffer) 1241static int ioctl_allocate_iso_resource(struct client *client,
1242 union ioctl_arg *arg)
1224{ 1243{
1225 struct fw_cdev_allocate_iso_resource *request = buffer; 1244 return init_iso_resource(client,
1226 1245 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1227 return init_iso_resource(client, request, ISO_RES_ALLOC);
1228} 1246}
1229 1247
1230static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) 1248static int ioctl_deallocate_iso_resource(struct client *client,
1249 union ioctl_arg *arg)
1231{ 1250{
1232 struct fw_cdev_deallocate *request = buffer; 1251 return release_client_resource(client,
1233 1252 arg->deallocate.handle, release_iso_resource, NULL);
1234 return release_client_resource(client, request->handle,
1235 release_iso_resource, NULL);
1236} 1253}
1237 1254
1238static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) 1255static int ioctl_allocate_iso_resource_once(struct client *client,
1256 union ioctl_arg *arg)
1239{ 1257{
1240 struct fw_cdev_allocate_iso_resource *request = buffer; 1258 return init_iso_resource(client,
1241 1259 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1242 return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1243} 1260}
1244 1261
1245static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) 1262static int ioctl_deallocate_iso_resource_once(struct client *client,
1263 union ioctl_arg *arg)
1246{ 1264{
1247 struct fw_cdev_allocate_iso_resource *request = buffer; 1265 return init_iso_resource(client,
1248 1266 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1249 return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1250} 1267}
1251 1268
1252/* 1269/*
@@ -1254,16 +1271,17 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe
1254 * limited by the device's link speed, the local node's link speed, 1271 * limited by the device's link speed, the local node's link speed,
1255 * and all PHY port speeds between the two links. 1272 * and all PHY port speeds between the two links.
1256 */ 1273 */
1257static int ioctl_get_speed(struct client *client, void *buffer) 1274static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1258{ 1275{
1259 return client->device->max_speed; 1276 return client->device->max_speed;
1260} 1277}
1261 1278
1262static int ioctl_send_broadcast_request(struct client *client, void *buffer) 1279static int ioctl_send_broadcast_request(struct client *client,
1280 union ioctl_arg *arg)
1263{ 1281{
1264 struct fw_cdev_send_request *request = buffer; 1282 struct fw_cdev_send_request *a = &arg->send_request;
1265 1283
1266 switch (request->tcode) { 1284 switch (a->tcode) {
1267 case TCODE_WRITE_QUADLET_REQUEST: 1285 case TCODE_WRITE_QUADLET_REQUEST:
1268 case TCODE_WRITE_BLOCK_REQUEST: 1286 case TCODE_WRITE_BLOCK_REQUEST:
1269 break; 1287 break;
@@ -1272,36 +1290,36 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1272 } 1290 }
1273 1291
1274 /* Security policy: Only allow accesses to Units Space. */ 1292 /* Security policy: Only allow accesses to Units Space. */
1275 if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) 1293 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1276 return -EACCES; 1294 return -EACCES;
1277 1295
1278 return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); 1296 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1279} 1297}
1280 1298
1281static int ioctl_send_stream_packet(struct client *client, void *buffer) 1299static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1282{ 1300{
1283 struct fw_cdev_send_stream_packet *p = buffer; 1301 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1284 struct fw_cdev_send_request request; 1302 struct fw_cdev_send_request request;
1285 int dest; 1303 int dest;
1286 1304
1287 if (p->speed > client->device->card->link_speed || 1305 if (a->speed > client->device->card->link_speed ||
1288 p->length > 1024 << p->speed) 1306 a->length > 1024 << a->speed)
1289 return -EIO; 1307 return -EIO;
1290 1308
1291 if (p->tag > 3 || p->channel > 63 || p->sy > 15) 1309 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1292 return -EINVAL; 1310 return -EINVAL;
1293 1311
1294 dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); 1312 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1295 request.tcode = TCODE_STREAM_DATA; 1313 request.tcode = TCODE_STREAM_DATA;
1296 request.length = p->length; 1314 request.length = a->length;
1297 request.closure = p->closure; 1315 request.closure = a->closure;
1298 request.data = p->data; 1316 request.data = a->data;
1299 request.generation = p->generation; 1317 request.generation = a->generation;
1300 1318
1301 return init_request(client, &request, dest, p->speed); 1319 return init_request(client, &request, dest, a->speed);
1302} 1320}
1303 1321
1304static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 1322static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1305 ioctl_get_info, 1323 ioctl_get_info,
1306 ioctl_send_request, 1324 ioctl_send_request,
1307 ioctl_allocate, 1325 ioctl_allocate,
@@ -1322,47 +1340,35 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
1322 ioctl_get_speed, 1340 ioctl_get_speed,
1323 ioctl_send_broadcast_request, 1341 ioctl_send_broadcast_request,
1324 ioctl_send_stream_packet, 1342 ioctl_send_stream_packet,
1343 ioctl_get_cycle_timer2,
1325}; 1344};
1326 1345
1327static int dispatch_ioctl(struct client *client, 1346static int dispatch_ioctl(struct client *client,
1328 unsigned int cmd, void __user *arg) 1347 unsigned int cmd, void __user *arg)
1329{ 1348{
1330 char buffer[sizeof(union { 1349 union ioctl_arg buffer;
1331 struct fw_cdev_get_info _00;
1332 struct fw_cdev_send_request _01;
1333 struct fw_cdev_allocate _02;
1334 struct fw_cdev_deallocate _03;
1335 struct fw_cdev_send_response _04;
1336 struct fw_cdev_initiate_bus_reset _05;
1337 struct fw_cdev_add_descriptor _06;
1338 struct fw_cdev_remove_descriptor _07;
1339 struct fw_cdev_create_iso_context _08;
1340 struct fw_cdev_queue_iso _09;
1341 struct fw_cdev_start_iso _0a;
1342 struct fw_cdev_stop_iso _0b;
1343 struct fw_cdev_get_cycle_timer _0c;
1344 struct fw_cdev_allocate_iso_resource _0d;
1345 struct fw_cdev_send_stream_packet _13;
1346 })];
1347 int ret; 1350 int ret;
1348 1351
1352 if (fw_device_is_shutdown(client->device))
1353 return -ENODEV;
1354
1349 if (_IOC_TYPE(cmd) != '#' || 1355 if (_IOC_TYPE(cmd) != '#' ||
1350 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 1356 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
1351 return -EINVAL; 1357 return -EINVAL;
1352 1358
1353 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1359 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1354 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1360 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1355 copy_from_user(buffer, arg, _IOC_SIZE(cmd))) 1361 copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1356 return -EFAULT; 1362 return -EFAULT;
1357 } 1363 }
1358 1364
1359 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 1365 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1360 if (ret < 0) 1366 if (ret < 0)
1361 return ret; 1367 return ret;
1362 1368
1363 if (_IOC_DIR(cmd) & _IOC_READ) { 1369 if (_IOC_DIR(cmd) & _IOC_READ) {
1364 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1370 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1365 copy_to_user(arg, buffer, _IOC_SIZE(cmd))) 1371 copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1366 return -EFAULT; 1372 return -EFAULT;
1367 } 1373 }
1368 1374
@@ -1372,24 +1378,14 @@ static int dispatch_ioctl(struct client *client,
1372static long fw_device_op_ioctl(struct file *file, 1378static long fw_device_op_ioctl(struct file *file,
1373 unsigned int cmd, unsigned long arg) 1379 unsigned int cmd, unsigned long arg)
1374{ 1380{
1375 struct client *client = file->private_data; 1381 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1376
1377 if (fw_device_is_shutdown(client->device))
1378 return -ENODEV;
1379
1380 return dispatch_ioctl(client, cmd, (void __user *) arg);
1381} 1382}
1382 1383
1383#ifdef CONFIG_COMPAT 1384#ifdef CONFIG_COMPAT
1384static long fw_device_op_compat_ioctl(struct file *file, 1385static long fw_device_op_compat_ioctl(struct file *file,
1385 unsigned int cmd, unsigned long arg) 1386 unsigned int cmd, unsigned long arg)
1386{ 1387{
1387 struct client *client = file->private_data; 1388 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1388
1389 if (fw_device_is_shutdown(client->device))
1390 return -ENODEV;
1391
1392 return dispatch_ioctl(client, cmd, compat_ptr(arg));
1393} 1389}
1394#endif 1390#endif
1395 1391
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 9d0dfcbe2c1c..014cabd3afda 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -18,6 +18,7 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/bug.h>
21#include <linux/ctype.h> 22#include <linux/ctype.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/device.h> 24#include <linux/device.h>
@@ -43,7 +44,7 @@
43 44
44#include "core.h" 45#include "core.h"
45 46
46void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) 47void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
47{ 48{
48 ci->p = p + 1; 49 ci->p = p + 1;
49 ci->end = ci->p + (p[0] >> 16); 50 ci->end = ci->p + (p[0] >> 16);
@@ -59,9 +60,76 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
59} 60}
60EXPORT_SYMBOL(fw_csr_iterator_next); 61EXPORT_SYMBOL(fw_csr_iterator_next);
61 62
63static const u32 *search_leaf(const u32 *directory, int search_key)
64{
65 struct fw_csr_iterator ci;
66 int last_key = 0, key, value;
67
68 fw_csr_iterator_init(&ci, directory);
69 while (fw_csr_iterator_next(&ci, &key, &value)) {
70 if (last_key == search_key &&
71 key == (CSR_DESCRIPTOR | CSR_LEAF))
72 return ci.p - 1 + value;
73
74 last_key = key;
75 }
76
77 return NULL;
78}
79
80static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
81{
82 unsigned int quadlets, i;
83 char c;
84
85 if (!size || !buf)
86 return -EINVAL;
87
88 quadlets = min(block[0] >> 16, 256U);
89 if (quadlets < 2)
90 return -ENODATA;
91
92 if (block[1] != 0 || block[2] != 0)
93 /* unknown language/character set */
94 return -ENODATA;
95
96 block += 3;
97 quadlets -= 2;
98 for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
99 c = block[i / 4] >> (24 - 8 * (i % 4));
100 if (c == '\0')
101 break;
102 buf[i] = c;
103 }
104 buf[i] = '\0';
105
106 return i;
107}
108
109/**
110 * fw_csr_string - reads a string from the configuration ROM
111 * @directory: e.g. root directory or unit directory
112 * @key: the key of the preceding directory entry
113 * @buf: where to put the string
114 * @size: size of @buf, in bytes
115 *
116 * The string is taken from a minimal ASCII text descriptor leaf after
117 * the immediate entry with @key. The string is zero-terminated.
118 * Returns strlen(buf) or a negative error code.
119 */
120int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
121{
122 const u32 *leaf = search_leaf(directory, key);
123 if (!leaf)
124 return -ENOENT;
125
126 return textual_leaf_to_string(leaf, buf, size);
127}
128EXPORT_SYMBOL(fw_csr_string);
129
62static bool is_fw_unit(struct device *dev); 130static bool is_fw_unit(struct device *dev);
63 131
64static int match_unit_directory(u32 *directory, u32 match_flags, 132static int match_unit_directory(const u32 *directory, u32 match_flags,
65 const struct ieee1394_device_id *id) 133 const struct ieee1394_device_id *id)
66{ 134{
67 struct fw_csr_iterator ci; 135 struct fw_csr_iterator ci;
@@ -195,7 +263,7 @@ static ssize_t show_immediate(struct device *dev,
195 struct config_rom_attribute *attr = 263 struct config_rom_attribute *attr =
196 container_of(dattr, struct config_rom_attribute, attr); 264 container_of(dattr, struct config_rom_attribute, attr);
197 struct fw_csr_iterator ci; 265 struct fw_csr_iterator ci;
198 u32 *dir; 266 const u32 *dir;
199 int key, value, ret = -ENOENT; 267 int key, value, ret = -ENOENT;
200 268
201 down_read(&fw_device_rwsem); 269 down_read(&fw_device_rwsem);
@@ -226,10 +294,10 @@ static ssize_t show_text_leaf(struct device *dev,
226{ 294{
227 struct config_rom_attribute *attr = 295 struct config_rom_attribute *attr =
228 container_of(dattr, struct config_rom_attribute, attr); 296 container_of(dattr, struct config_rom_attribute, attr);
229 struct fw_csr_iterator ci; 297 const u32 *dir;
230 u32 *dir, *block = NULL, *p, *end; 298 size_t bufsize;
231 int length, key, value, last_key = 0, ret = -ENOENT; 299 char dummy_buf[2];
232 char *b; 300 int ret;
233 301
234 down_read(&fw_device_rwsem); 302 down_read(&fw_device_rwsem);
235 303
@@ -238,40 +306,23 @@ static ssize_t show_text_leaf(struct device *dev,
238 else 306 else
239 dir = fw_device(dev)->config_rom + 5; 307 dir = fw_device(dev)->config_rom + 5;
240 308
241 fw_csr_iterator_init(&ci, dir); 309 if (buf) {
242 while (fw_csr_iterator_next(&ci, &key, &value)) { 310 bufsize = PAGE_SIZE - 1;
243 if (attr->key == last_key && 311 } else {
244 key == (CSR_DESCRIPTOR | CSR_LEAF)) 312 buf = dummy_buf;
245 block = ci.p - 1 + value; 313 bufsize = 1;
246 last_key = key;
247 } 314 }
248 315
249 if (block == NULL) 316 ret = fw_csr_string(dir, attr->key, buf, bufsize);
250 goto out;
251
252 length = min(block[0] >> 16, 256U);
253 if (length < 3)
254 goto out;
255
256 if (block[1] != 0 || block[2] != 0)
257 /* Unknown encoding. */
258 goto out;
259 317
260 if (buf == NULL) { 318 if (ret >= 0) {
261 ret = length * 4; 319 /* Strip trailing whitespace and add newline. */
262 goto out; 320 while (ret > 0 && isspace(buf[ret - 1]))
321 ret--;
322 strcpy(buf + ret, "\n");
323 ret++;
263 } 324 }
264 325
265 b = buf;
266 end = &block[length + 1];
267 for (p = &block[3]; p < end; p++, b += 4)
268 * (u32 *) b = (__force u32) __cpu_to_be32(*p);
269
270 /* Strip trailing whitespace and add newline. */
271 while (b--, (isspace(*b) || *b == '\0') && b > buf);
272 strcpy(b + 1, "\n");
273 ret = b + 2 - buf;
274 out:
275 up_read(&fw_device_rwsem); 326 up_read(&fw_device_rwsem);
276 327
277 return ret; 328 return ret;
@@ -371,7 +422,7 @@ static ssize_t guid_show(struct device *dev,
371 return ret; 422 return ret;
372} 423}
373 424
374static int units_sprintf(char *buf, u32 *directory) 425static int units_sprintf(char *buf, const u32 *directory)
375{ 426{
376 struct fw_csr_iterator ci; 427 struct fw_csr_iterator ci;
377 int key, value; 428 int key, value;
@@ -441,28 +492,29 @@ static int read_rom(struct fw_device *device,
441 return rcode; 492 return rcode;
442} 493}
443 494
444#define READ_BIB_ROM_SIZE 256 495#define MAX_CONFIG_ROM_SIZE 256
445#define READ_BIB_STACK_SIZE 16
446 496
447/* 497/*
448 * Read the bus info block, perform a speed probe, and read all of the rest of 498 * Read the bus info block, perform a speed probe, and read all of the rest of
449 * the config ROM. We do all this with a cached bus generation. If the bus 499 * the config ROM. We do all this with a cached bus generation. If the bus
450 * generation changes under us, read_bus_info_block will fail and get retried. 500 * generation changes under us, read_config_rom will fail and get retried.
451 * It's better to start all over in this case because the node from which we 501 * It's better to start all over in this case because the node from which we
452 * are reading the ROM may have changed the ROM during the reset. 502 * are reading the ROM may have changed the ROM during the reset.
453 */ 503 */
454static int read_bus_info_block(struct fw_device *device, int generation) 504static int read_config_rom(struct fw_device *device, int generation)
455{ 505{
456 u32 *rom, *stack, *old_rom, *new_rom; 506 const u32 *old_rom, *new_rom;
507 u32 *rom, *stack;
457 u32 sp, key; 508 u32 sp, key;
458 int i, end, length, ret = -1; 509 int i, end, length, ret = -1;
459 510
460 rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE + 511 rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
461 sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL); 512 sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
462 if (rom == NULL) 513 if (rom == NULL)
463 return -ENOMEM; 514 return -ENOMEM;
464 515
465 stack = &rom[READ_BIB_ROM_SIZE]; 516 stack = &rom[MAX_CONFIG_ROM_SIZE];
517 memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
466 518
467 device->max_speed = SCODE_100; 519 device->max_speed = SCODE_100;
468 520
@@ -529,40 +581,54 @@ static int read_bus_info_block(struct fw_device *device, int generation)
529 */ 581 */
530 key = stack[--sp]; 582 key = stack[--sp];
531 i = key & 0xffffff; 583 i = key & 0xffffff;
532 if (i >= READ_BIB_ROM_SIZE) 584 if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
533 /*
534 * The reference points outside the standard
535 * config rom area, something's fishy.
536 */
537 goto out; 585 goto out;
538 586
539 /* Read header quadlet for the block to get the length. */ 587 /* Read header quadlet for the block to get the length. */
540 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 588 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
541 goto out; 589 goto out;
542 end = i + (rom[i] >> 16) + 1; 590 end = i + (rom[i] >> 16) + 1;
543 i++; 591 if (end > MAX_CONFIG_ROM_SIZE) {
544 if (end > READ_BIB_ROM_SIZE)
545 /* 592 /*
546 * This block extends outside standard config 593 * This block extends outside the config ROM which is
547 * area (and the array we're reading it 594 * a firmware bug. Ignore this whole block, i.e.
548 * into). That's broken, so ignore this 595 * simply set a fake block length of 0.
549 * device.
550 */ 596 */
551 goto out; 597 fw_error("skipped invalid ROM block %x at %llx\n",
598 rom[i],
599 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
600 rom[i] = 0;
601 end = i;
602 }
603 i++;
552 604
553 /* 605 /*
554 * Now read in the block. If this is a directory 606 * Now read in the block. If this is a directory
555 * block, check the entries as we read them to see if 607 * block, check the entries as we read them to see if
556 * it references another block, and push it in that case. 608 * it references another block, and push it in that case.
557 */ 609 */
558 while (i < end) { 610 for (; i < end; i++) {
559 if (read_rom(device, generation, i, &rom[i]) != 611 if (read_rom(device, generation, i, &rom[i]) !=
560 RCODE_COMPLETE) 612 RCODE_COMPLETE)
561 goto out; 613 goto out;
562 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 && 614
563 sp < READ_BIB_STACK_SIZE) 615 if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
564 stack[sp++] = i + rom[i]; 616 continue;
565 i++; 617 /*
618 * Offset points outside the ROM. May be a firmware
619 * bug or an Extended ROM entry (IEEE 1212-2001 clause
620 * 7.7.18). Simply overwrite this pointer here by a
621 * fake immediate entry so that later iterators over
622 * the ROM don't have to check offsets all the time.
623 */
624 if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
625 fw_error("skipped unsupported ROM entry %x at %llx\n",
626 rom[i],
627 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
628 rom[i] = 0;
629 continue;
630 }
631 stack[sp++] = i + rom[i];
566 } 632 }
567 if (length < i) 633 if (length < i)
568 length = i; 634 length = i;
@@ -905,7 +971,7 @@ static void fw_device_init(struct work_struct *work)
905 * device. 971 * device.
906 */ 972 */
907 973
908 if (read_bus_info_block(device, device->generation) < 0) { 974 if (read_config_rom(device, device->generation) < 0) {
909 if (device->config_rom_retries < MAX_RETRIES && 975 if (device->config_rom_retries < MAX_RETRIES &&
910 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 976 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
911 device->config_rom_retries++; 977 device->config_rom_retries++;
@@ -1022,7 +1088,7 @@ enum {
1022}; 1088};
1023 1089
1024/* Reread and compare bus info block and header of root directory */ 1090/* Reread and compare bus info block and header of root directory */
1025static int reread_bus_info_block(struct fw_device *device, int generation) 1091static int reread_config_rom(struct fw_device *device, int generation)
1026{ 1092{
1027 u32 q; 1093 u32 q;
1028 int i; 1094 int i;
@@ -1048,7 +1114,7 @@ static void fw_device_refresh(struct work_struct *work)
1048 struct fw_card *card = device->card; 1114 struct fw_card *card = device->card;
1049 int node_id = device->node_id; 1115 int node_id = device->node_id;
1050 1116
1051 switch (reread_bus_info_block(device, device->generation)) { 1117 switch (reread_config_rom(device, device->generation)) {
1052 case REREAD_BIB_ERROR: 1118 case REREAD_BIB_ERROR:
1053 if (device->config_rom_retries < MAX_RETRIES / 2 && 1119 if (device->config_rom_retries < MAX_RETRIES / 2 &&
1054 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1120 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
@@ -1082,7 +1148,7 @@ static void fw_device_refresh(struct work_struct *work)
1082 */ 1148 */
1083 device_for_each_child(&device->device, NULL, shutdown_unit); 1149 device_for_each_child(&device->device, NULL, shutdown_unit);
1084 1150
1085 if (read_bus_info_block(device, device->generation) < 0) { 1151 if (read_config_rom(device, device->generation) < 0) {
1086 if (device->config_rom_retries < MAX_RETRIES && 1152 if (device->config_rom_retries < MAX_RETRIES &&
1087 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1153 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1088 device->config_rom_retries++; 1154 device->config_rom_retries++;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 495849eb13cc..673b03f8b4ec 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -921,23 +921,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
921 void *payload, size_t length, void *callback_data) 921 void *payload, size_t length, void *callback_data)
922{ 922{
923 int reg = offset & ~CSR_REGISTER_BASE; 923 int reg = offset & ~CSR_REGISTER_BASE;
924 unsigned long long bus_time;
925 __be32 *data = payload; 924 __be32 *data = payload;
926 int rcode = RCODE_COMPLETE; 925 int rcode = RCODE_COMPLETE;
927 926
928 switch (reg) { 927 switch (reg) {
929 case CSR_CYCLE_TIME: 928 case CSR_CYCLE_TIME:
930 case CSR_BUS_TIME: 929 if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
931 if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) { 930 *data = cpu_to_be32(card->driver->get_cycle_time(card));
932 rcode = RCODE_TYPE_ERROR;
933 break;
934 }
935
936 bus_time = card->driver->get_bus_time(card);
937 if (reg == CSR_CYCLE_TIME)
938 *data = cpu_to_be32(bus_time);
939 else 931 else
940 *data = cpu_to_be32(bus_time >> 25); 932 rcode = RCODE_TYPE_ERROR;
941 break; 933 break;
942 934
943 case CSR_BROADCAST_CHANNEL: 935 case CSR_BROADCAST_CHANNEL:
@@ -968,6 +960,9 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
968 case CSR_BUSY_TIMEOUT: 960 case CSR_BUSY_TIMEOUT:
969 /* FIXME: Implement this. */ 961 /* FIXME: Implement this. */
970 962
963 case CSR_BUS_TIME:
964 /* Useless without initialization by the bus manager. */
965
971 default: 966 default:
972 rcode = RCODE_ADDRESS_ERROR; 967 rcode = RCODE_ADDRESS_ERROR;
973 break; 968 break;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index ed3b1a765c00..fb0321300cce 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -70,7 +70,7 @@ struct fw_card_driver {
70 int (*enable_phys_dma)(struct fw_card *card, 70 int (*enable_phys_dma)(struct fw_card *card,
71 int node_id, int generation); 71 int node_id, int generation);
72 72
73 u64 (*get_bus_time)(struct fw_card *card); 73 u32 (*get_cycle_time)(struct fw_card *card);
74 74
75 struct fw_iso_context * 75 struct fw_iso_context *
76 (*allocate_iso_context)(struct fw_card *card, 76 (*allocate_iso_context)(struct fw_card *card,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 43ebf337b131..75dc6988cffd 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -38,7 +38,6 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/string.h> 39#include <linux/string.h>
40 40
41#include <asm/atomic.h>
42#include <asm/byteorder.h> 41#include <asm/byteorder.h>
43#include <asm/page.h> 42#include <asm/page.h>
44#include <asm/system.h> 43#include <asm/system.h>
@@ -73,20 +72,6 @@ struct descriptor {
73 __le16 transfer_status; 72 __le16 transfer_status;
74} __attribute__((aligned(16))); 73} __attribute__((aligned(16)));
75 74
76struct db_descriptor {
77 __le16 first_size;
78 __le16 control;
79 __le16 second_req_count;
80 __le16 first_req_count;
81 __le32 branch_address;
82 __le16 second_res_count;
83 __le16 first_res_count;
84 __le32 reserved0;
85 __le32 first_buffer;
86 __le32 second_buffer;
87 __le32 reserved1;
88} __attribute__((aligned(16)));
89
90#define CONTROL_SET(regs) (regs) 75#define CONTROL_SET(regs) (regs)
91#define CONTROL_CLEAR(regs) ((regs) + 4) 76#define CONTROL_CLEAR(regs) ((regs) + 4)
92#define COMMAND_PTR(regs) ((regs) + 12) 77#define COMMAND_PTR(regs) ((regs) + 12)
@@ -181,31 +166,16 @@ struct fw_ohci {
181 struct fw_card card; 166 struct fw_card card;
182 167
183 __iomem char *registers; 168 __iomem char *registers;
184 dma_addr_t self_id_bus;
185 __le32 *self_id_cpu;
186 struct tasklet_struct bus_reset_tasklet;
187 int node_id; 169 int node_id;
188 int generation; 170 int generation;
189 int request_generation; /* for timestamping incoming requests */ 171 int request_generation; /* for timestamping incoming requests */
190 atomic_t bus_seconds; 172 unsigned quirks;
191
192 bool use_dualbuffer;
193 bool old_uninorth;
194 bool bus_reset_packet_quirk;
195 173
196 /* 174 /*
197 * Spinlock for accessing fw_ohci data. Never call out of 175 * Spinlock for accessing fw_ohci data. Never call out of
198 * this driver with this lock held. 176 * this driver with this lock held.
199 */ 177 */
200 spinlock_t lock; 178 spinlock_t lock;
201 u32 self_id_buffer[512];
202
203 /* Config rom buffers */
204 __be32 *config_rom;
205 dma_addr_t config_rom_bus;
206 __be32 *next_config_rom;
207 dma_addr_t next_config_rom_bus;
208 __be32 next_header;
209 179
210 struct ar_context ar_request_ctx; 180 struct ar_context ar_request_ctx;
211 struct ar_context ar_response_ctx; 181 struct ar_context ar_response_ctx;
@@ -217,6 +187,18 @@ struct fw_ohci {
217 u64 ir_context_channels; 187 u64 ir_context_channels;
218 u32 ir_context_mask; 188 u32 ir_context_mask;
219 struct iso_context *ir_context_list; 189 struct iso_context *ir_context_list;
190
191 __be32 *config_rom;
192 dma_addr_t config_rom_bus;
193 __be32 *next_config_rom;
194 dma_addr_t next_config_rom_bus;
195 __be32 next_header;
196
197 __le32 *self_id_cpu;
198 dma_addr_t self_id_bus;
199 struct tasklet_struct bus_reset_tasklet;
200
201 u32 self_id_buffer[512];
220}; 202};
221 203
222static inline struct fw_ohci *fw_ohci(struct fw_card *card) 204static inline struct fw_ohci *fw_ohci(struct fw_card *card)
@@ -249,6 +231,30 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
249 231
250static char ohci_driver_name[] = KBUILD_MODNAME; 232static char ohci_driver_name[] = KBUILD_MODNAME;
251 233
234#define QUIRK_CYCLE_TIMER 1
235#define QUIRK_RESET_PACKET 2
236#define QUIRK_BE_HEADERS 4
237
238/* In case of multiple matches in ohci_quirks[], only the first one is used. */
239static const struct {
240 unsigned short vendor, device, flags;
241} ohci_quirks[] = {
242 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
243 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
244 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
245 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
246 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
247};
248
249/* This overrides anything that was found in ohci_quirks[]. */
250static int param_quirks;
251module_param_named(quirks, param_quirks, int, 0644);
252MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
253 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
254 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
255 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
256 ")");
257
252#ifdef CONFIG_FIREWIRE_OHCI_DEBUG 258#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
253 259
254#define OHCI_PARAM_DEBUG_AT_AR 1 260#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -275,7 +281,7 @@ static void log_irqs(u32 evt)
275 !(evt & OHCI1394_busReset)) 281 !(evt & OHCI1394_busReset))
276 return; 282 return;
277 283
278 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 284 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
279 evt & OHCI1394_selfIDComplete ? " selfID" : "", 285 evt & OHCI1394_selfIDComplete ? " selfID" : "",
280 evt & OHCI1394_RQPkt ? " AR_req" : "", 286 evt & OHCI1394_RQPkt ? " AR_req" : "",
281 evt & OHCI1394_RSPkt ? " AR_resp" : "", 287 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -285,7 +291,6 @@ static void log_irqs(u32 evt)
285 evt & OHCI1394_isochTx ? " IT" : "", 291 evt & OHCI1394_isochTx ? " IT" : "",
286 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 292 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
287 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 293 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
288 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
289 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 294 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
290 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 295 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
291 evt & OHCI1394_busReset ? " busReset" : "", 296 evt & OHCI1394_busReset ? " busReset" : "",
@@ -293,8 +298,7 @@ static void log_irqs(u32 evt)
293 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 298 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
294 OHCI1394_respTxComplete | OHCI1394_isochRx | 299 OHCI1394_respTxComplete | OHCI1394_isochRx |
295 OHCI1394_isochTx | OHCI1394_postedWriteErr | 300 OHCI1394_isochTx | OHCI1394_postedWriteErr |
296 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 301 OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
297 OHCI1394_cycleInconsistent |
298 OHCI1394_regAccessFail | OHCI1394_busReset) 302 OHCI1394_regAccessFail | OHCI1394_busReset)
299 ? " ?" : ""); 303 ? " ?" : "");
300} 304}
@@ -524,7 +528,7 @@ static void ar_context_release(struct ar_context *ctx)
524 528
525#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 529#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
526#define cond_le32_to_cpu(v) \ 530#define cond_le32_to_cpu(v) \
527 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) 531 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
528#else 532#else
529#define cond_le32_to_cpu(v) le32_to_cpu(v) 533#define cond_le32_to_cpu(v) le32_to_cpu(v)
530#endif 534#endif
@@ -605,7 +609,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
605 * at a slightly incorrect time (in bus_reset_tasklet). 609 * at a slightly incorrect time (in bus_reset_tasklet).
606 */ 610 */
607 if (evt == OHCI1394_evt_bus_reset) { 611 if (evt == OHCI1394_evt_bus_reset) {
608 if (!ohci->bus_reset_packet_quirk) 612 if (!(ohci->quirks & QUIRK_RESET_PACKET))
609 ohci->request_generation = (p.header[2] >> 16) & 0xff; 613 ohci->request_generation = (p.header[2] >> 16) & 0xff;
610 } else if (ctx == &ohci->ar_request_ctx) { 614 } else if (ctx == &ohci->ar_request_ctx) {
611 fw_core_handle_request(&ohci->card, &p); 615 fw_core_handle_request(&ohci->card, &p);
@@ -1329,7 +1333,7 @@ static void bus_reset_tasklet(unsigned long data)
1329 context_stop(&ohci->at_response_ctx); 1333 context_stop(&ohci->at_response_ctx);
1330 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1334 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1331 1335
1332 if (ohci->bus_reset_packet_quirk) 1336 if (ohci->quirks & QUIRK_RESET_PACKET)
1333 ohci->request_generation = generation; 1337 ohci->request_generation = generation;
1334 1338
1335 /* 1339 /*
@@ -1384,7 +1388,7 @@ static void bus_reset_tasklet(unsigned long data)
1384static irqreturn_t irq_handler(int irq, void *data) 1388static irqreturn_t irq_handler(int irq, void *data)
1385{ 1389{
1386 struct fw_ohci *ohci = data; 1390 struct fw_ohci *ohci = data;
1387 u32 event, iso_event, cycle_time; 1391 u32 event, iso_event;
1388 int i; 1392 int i;
1389 1393
1390 event = reg_read(ohci, OHCI1394_IntEventClear); 1394 event = reg_read(ohci, OHCI1394_IntEventClear);
@@ -1454,12 +1458,6 @@ static irqreturn_t irq_handler(int irq, void *data)
1454 fw_notify("isochronous cycle inconsistent\n"); 1458 fw_notify("isochronous cycle inconsistent\n");
1455 } 1459 }
1456 1460
1457 if (event & OHCI1394_cycle64Seconds) {
1458 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1459 if ((cycle_time & 0x80000000) == 0)
1460 atomic_inc(&ohci->bus_seconds);
1461 }
1462
1463 return IRQ_HANDLED; 1461 return IRQ_HANDLED;
1464} 1462}
1465 1463
@@ -1553,8 +1551,7 @@ static int ohci_enable(struct fw_card *card,
1553 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1551 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1554 OHCI1394_isochRx | OHCI1394_isochTx | 1552 OHCI1394_isochRx | OHCI1394_isochTx |
1555 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1553 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1556 OHCI1394_cycleInconsistent | 1554 OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
1557 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1558 OHCI1394_masterIntEnable); 1555 OHCI1394_masterIntEnable);
1559 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1556 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1560 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1557 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
@@ -1794,16 +1791,61 @@ static int ohci_enable_phys_dma(struct fw_card *card,
1794#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1791#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1795} 1792}
1796 1793
1797static u64 ohci_get_bus_time(struct fw_card *card) 1794static u32 cycle_timer_ticks(u32 cycle_timer)
1798{ 1795{
1799 struct fw_ohci *ohci = fw_ohci(card); 1796 u32 ticks;
1800 u32 cycle_time;
1801 u64 bus_time;
1802 1797
1803 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1798 ticks = cycle_timer & 0xfff;
1804 bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time; 1799 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1800 ticks += (3072 * 8000) * (cycle_timer >> 25);
1801
1802 return ticks;
1803}
1804
1805/*
1806 * Some controllers exhibit one or more of the following bugs when updating the
1807 * iso cycle timer register:
1808 * - When the lowest six bits are wrapping around to zero, a read that happens
1809 * at the same time will return garbage in the lowest ten bits.
1810 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1811 * not incremented for about 60 ns.
1812 * - Occasionally, the entire register reads zero.
1813 *
1814 * To catch these, we read the register three times and ensure that the
1815 * difference between each two consecutive reads is approximately the same, i.e.
1816 * less than twice the other. Furthermore, any negative difference indicates an
1817 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1818 * execute, so we have enough precision to compute the ratio of the differences.)
1819 */
1820static u32 ohci_get_cycle_time(struct fw_card *card)
1821{
1822 struct fw_ohci *ohci = fw_ohci(card);
1823 u32 c0, c1, c2;
1824 u32 t0, t1, t2;
1825 s32 diff01, diff12;
1826 int i;
1805 1827
1806 return bus_time; 1828 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1829
1830 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1831 i = 0;
1832 c1 = c2;
1833 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1834 do {
1835 c0 = c1;
1836 c1 = c2;
1837 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1838 t0 = cycle_timer_ticks(c0);
1839 t1 = cycle_timer_ticks(c1);
1840 t2 = cycle_timer_ticks(c2);
1841 diff01 = t1 - t0;
1842 diff12 = t2 - t1;
1843 } while ((diff01 <= 0 || diff12 <= 0 ||
1844 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1845 && i++ < 20);
1846 }
1847
1848 return c2;
1807} 1849}
1808 1850
1809static void copy_iso_headers(struct iso_context *ctx, void *p) 1851static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1828,52 +1870,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p)
1828 ctx->header_length += ctx->base.header_size; 1870 ctx->header_length += ctx->base.header_size;
1829} 1871}
1830 1872
1831static int handle_ir_dualbuffer_packet(struct context *context,
1832 struct descriptor *d,
1833 struct descriptor *last)
1834{
1835 struct iso_context *ctx =
1836 container_of(context, struct iso_context, context);
1837 struct db_descriptor *db = (struct db_descriptor *) d;
1838 __le32 *ir_header;
1839 size_t header_length;
1840 void *p, *end;
1841
1842 if (db->first_res_count != 0 && db->second_res_count != 0) {
1843 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1844 /* This descriptor isn't done yet, stop iteration. */
1845 return 0;
1846 }
1847 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1848 }
1849
1850 header_length = le16_to_cpu(db->first_req_count) -
1851 le16_to_cpu(db->first_res_count);
1852
1853 p = db + 1;
1854 end = p + header_length;
1855 while (p < end) {
1856 copy_iso_headers(ctx, p);
1857 ctx->excess_bytes +=
1858 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1859 p += max(ctx->base.header_size, (size_t)8);
1860 }
1861
1862 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1863 le16_to_cpu(db->second_res_count);
1864
1865 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1866 ir_header = (__le32 *) (db + 1);
1867 ctx->base.callback(&ctx->base,
1868 le32_to_cpu(ir_header[0]) & 0xffff,
1869 ctx->header_length, ctx->header,
1870 ctx->base.callback_data);
1871 ctx->header_length = 0;
1872 }
1873
1874 return 1;
1875}
1876
1877static int handle_ir_packet_per_buffer(struct context *context, 1873static int handle_ir_packet_per_buffer(struct context *context,
1878 struct descriptor *d, 1874 struct descriptor *d,
1879 struct descriptor *last) 1875 struct descriptor *last)
@@ -1960,10 +1956,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1960 channels = &ohci->ir_context_channels; 1956 channels = &ohci->ir_context_channels;
1961 mask = &ohci->ir_context_mask; 1957 mask = &ohci->ir_context_mask;
1962 list = ohci->ir_context_list; 1958 list = ohci->ir_context_list;
1963 if (ohci->use_dualbuffer) 1959 callback = handle_ir_packet_per_buffer;
1964 callback = handle_ir_dualbuffer_packet;
1965 else
1966 callback = handle_ir_packet_per_buffer;
1967 } 1960 }
1968 1961
1969 spin_lock_irqsave(&ohci->lock, flags); 1962 spin_lock_irqsave(&ohci->lock, flags);
@@ -2026,8 +2019,6 @@ static int ohci_start_iso(struct fw_iso_context *base,
2026 } else { 2019 } else {
2027 index = ctx - ohci->ir_context_list; 2020 index = ctx - ohci->ir_context_list;
2028 control = IR_CONTEXT_ISOCH_HEADER; 2021 control = IR_CONTEXT_ISOCH_HEADER;
2029 if (ohci->use_dualbuffer)
2030 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
2031 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2022 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2032 if (cycle >= 0) { 2023 if (cycle >= 0) {
2033 match |= (cycle & 0x07fff) << 12; 2024 match |= (cycle & 0x07fff) << 12;
@@ -2188,92 +2179,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2188 return 0; 2179 return 0;
2189} 2180}
2190 2181
2191static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2192 struct fw_iso_packet *packet,
2193 struct fw_iso_buffer *buffer,
2194 unsigned long payload)
2195{
2196 struct iso_context *ctx = container_of(base, struct iso_context, base);
2197 struct db_descriptor *db = NULL;
2198 struct descriptor *d;
2199 struct fw_iso_packet *p;
2200 dma_addr_t d_bus, page_bus;
2201 u32 z, header_z, length, rest;
2202 int page, offset, packet_count, header_size;
2203
2204 /*
2205 * FIXME: Cycle lost behavior should be configurable: lose
2206 * packet, retransmit or terminate..
2207 */
2208
2209 p = packet;
2210 z = 2;
2211
2212 /*
2213 * The OHCI controller puts the isochronous header and trailer in the
2214 * buffer, so we need at least 8 bytes.
2215 */
2216 packet_count = p->header_length / ctx->base.header_size;
2217 header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2218
2219 /* Get header size in number of descriptors. */
2220 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2221 page = payload >> PAGE_SHIFT;
2222 offset = payload & ~PAGE_MASK;
2223 rest = p->payload_length;
2224 /*
2225 * The controllers I've tested have not worked correctly when
2226 * second_req_count is zero. Rather than do something we know won't
2227 * work, return an error
2228 */
2229 if (rest == 0)
2230 return -EINVAL;
2231
2232 while (rest > 0) {
2233 d = context_get_descriptors(&ctx->context,
2234 z + header_z, &d_bus);
2235 if (d == NULL)
2236 return -ENOMEM;
2237
2238 db = (struct db_descriptor *) d;
2239 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2240 DESCRIPTOR_BRANCH_ALWAYS);
2241 db->first_size =
2242 cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2243 if (p->skip && rest == p->payload_length) {
2244 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2245 db->first_req_count = db->first_size;
2246 } else {
2247 db->first_req_count = cpu_to_le16(header_size);
2248 }
2249 db->first_res_count = db->first_req_count;
2250 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
2251
2252 if (p->skip && rest == p->payload_length)
2253 length = 4;
2254 else if (offset + rest < PAGE_SIZE)
2255 length = rest;
2256 else
2257 length = PAGE_SIZE - offset;
2258
2259 db->second_req_count = cpu_to_le16(length);
2260 db->second_res_count = db->second_req_count;
2261 page_bus = page_private(buffer->pages[page]);
2262 db->second_buffer = cpu_to_le32(page_bus + offset);
2263
2264 if (p->interrupt && length == rest)
2265 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2266
2267 context_append(&ctx->context, d, z, header_z);
2268 offset = (offset + length) & ~PAGE_MASK;
2269 rest -= length;
2270 if (offset == 0)
2271 page++;
2272 }
2273
2274 return 0;
2275}
2276
2277static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2182static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2278 struct fw_iso_packet *packet, 2183 struct fw_iso_packet *packet,
2279 struct fw_iso_buffer *buffer, 2184 struct fw_iso_buffer *buffer,
@@ -2364,9 +2269,6 @@ static int ohci_queue_iso(struct fw_iso_context *base,
2364 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2269 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2365 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2270 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2366 ret = ohci_queue_iso_transmit(base, packet, buffer, payload); 2271 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2367 else if (ctx->context.ohci->use_dualbuffer)
2368 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2369 buffer, payload);
2370 else 2272 else
2371 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2273 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2372 buffer, payload); 2274 buffer, payload);
@@ -2383,7 +2285,7 @@ static const struct fw_card_driver ohci_driver = {
2383 .send_response = ohci_send_response, 2285 .send_response = ohci_send_response,
2384 .cancel_packet = ohci_cancel_packet, 2286 .cancel_packet = ohci_cancel_packet,
2385 .enable_phys_dma = ohci_enable_phys_dma, 2287 .enable_phys_dma = ohci_enable_phys_dma,
2386 .get_bus_time = ohci_get_bus_time, 2288 .get_cycle_time = ohci_get_cycle_time,
2387 2289
2388 .allocate_iso_context = ohci_allocate_iso_context, 2290 .allocate_iso_context = ohci_allocate_iso_context,
2389 .free_iso_context = ohci_free_iso_context, 2291 .free_iso_context = ohci_free_iso_context,
@@ -2421,17 +2323,13 @@ static void ohci_pmac_off(struct pci_dev *dev)
2421#define ohci_pmac_off(dev) 2323#define ohci_pmac_off(dev)
2422#endif /* CONFIG_PPC_PMAC */ 2324#endif /* CONFIG_PPC_PMAC */
2423 2325
2424#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2425#define PCI_DEVICE_ID_AGERE_FW643 0x5901
2426#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
2427
2428static int __devinit pci_probe(struct pci_dev *dev, 2326static int __devinit pci_probe(struct pci_dev *dev,
2429 const struct pci_device_id *ent) 2327 const struct pci_device_id *ent)
2430{ 2328{
2431 struct fw_ohci *ohci; 2329 struct fw_ohci *ohci;
2432 u32 bus_options, max_receive, link_speed, version; 2330 u32 bus_options, max_receive, link_speed, version;
2433 u64 guid; 2331 u64 guid;
2434 int err; 2332 int i, err, n_ir, n_it;
2435 size_t size; 2333 size_t size;
2436 2334
2437 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2335 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2472,36 +2370,15 @@ static int __devinit pci_probe(struct pci_dev *dev,
2472 goto fail_iomem; 2370 goto fail_iomem;
2473 } 2371 }
2474 2372
2475 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2373 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
2476#if 0 2374 if (ohci_quirks[i].vendor == dev->vendor &&
2477 /* FIXME: make it a context option or remove dual-buffer mode */ 2375 (ohci_quirks[i].device == dev->device ||
2478 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; 2376 ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
2479#endif 2377 ohci->quirks = ohci_quirks[i].flags;
2480 2378 break;
2481 /* dual-buffer mode is broken if more than one IR context is active */ 2379 }
2482 if (dev->vendor == PCI_VENDOR_ID_AGERE && 2380 if (param_quirks)
2483 dev->device == PCI_DEVICE_ID_AGERE_FW643) 2381 ohci->quirks = param_quirks;
2484 ohci->use_dualbuffer = false;
2485
2486 /* dual-buffer mode is broken */
2487 if (dev->vendor == PCI_VENDOR_ID_RICOH &&
2488 dev->device == PCI_DEVICE_ID_RICOH_R5C832)
2489 ohci->use_dualbuffer = false;
2490
2491/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2492#if !defined(CONFIG_X86_32)
2493 /* dual-buffer mode is broken with descriptor addresses above 2G */
2494 if (dev->vendor == PCI_VENDOR_ID_TI &&
2495 (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
2496 dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
2497 ohci->use_dualbuffer = false;
2498#endif
2499
2500#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2501 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2502 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2503#endif
2504 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2505 2382
2506 ar_context_init(&ohci->ar_request_ctx, ohci, 2383 ar_context_init(&ohci->ar_request_ctx, ohci,
2507 OHCI1394_AsReqRcvContextControlSet); 2384 OHCI1394_AsReqRcvContextControlSet);
@@ -2516,17 +2393,19 @@ static int __devinit pci_probe(struct pci_dev *dev,
2516 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2393 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2517 2394
2518 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2395 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2519 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 2396 ohci->ir_context_channels = ~0ULL;
2397 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2520 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 2398 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2521 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask); 2399 n_ir = hweight32(ohci->ir_context_mask);
2522 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2400 size = sizeof(struct iso_context) * n_ir;
2401 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2523 2402
2524 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2403 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2525 ohci->ir_context_channels = ~0ULL; 2404 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2526 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2527 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2405 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2528 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); 2406 n_it = hweight32(ohci->it_context_mask);
2529 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 2407 size = sizeof(struct iso_context) * n_it;
2408 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2530 2409
2531 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 2410 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2532 err = -ENOMEM; 2411 err = -ENOMEM;
@@ -2553,8 +2432,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
2553 if (err) 2432 if (err)
2554 goto fail_self_id; 2433 goto fail_self_id;
2555 2434
2556 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2435 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2557 dev_name(&dev->dev), version >> 16, version & 0xff); 2436 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
2437 "%d IR + %d IT contexts, quirks 0x%x\n",
2438 dev_name(&dev->dev), version >> 16, version & 0xff,
2439 n_ir, n_it, ohci->quirks);
2558 2440
2559 return 0; 2441 return 0;
2560 2442
@@ -2662,7 +2544,7 @@ static int pci_resume(struct pci_dev *dev)
2662} 2544}
2663#endif 2545#endif
2664 2546
2665static struct pci_device_id pci_table[] = { 2547static const struct pci_device_id pci_table[] = {
2666 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 2548 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2667 { } 2549 { }
2668}; 2550};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 70fef40cd22f..ca264f2fdf0c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1014,7 +1014,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
1014 return 0; 1014 return 0;
1015} 1015}
1016 1016
1017static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory) 1017static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
1018 const u32 *directory)
1018{ 1019{
1019 struct fw_csr_iterator ci; 1020 struct fw_csr_iterator ci;
1020 int key, value; 1021 int key, value;
@@ -1027,7 +1028,7 @@ static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
1027 return 0; 1028 return 0;
1028} 1029}
1029 1030
1030static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, 1031static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
1031 u32 *model, u32 *firmware_revision) 1032 u32 *model, u32 *firmware_revision)
1032{ 1033{
1033 struct fw_csr_iterator ci; 1034 struct fw_csr_iterator ci;
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 39c5aa75b8f1..abe3f446ca48 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7drm-y := drm_auth.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o drm_drawable.o \ 8 drm_context.o drm_dma.o drm_drawable.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 000000000000..55d03ed05000
--- /dev/null
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,184 @@
1/**************************************************************************
2 *
3 * Copyright 2010 Pauli Nieminen.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Multipart buffer for coping data which is larger than the page size.
30 *
31 * Authors:
32 * Pauli Nieminen <suokkos-at-gmail-dot-com>
33 */
34
35#include "drm_buffer.h"
36
37/**
38 * Allocate the drm buffer object.
39 *
40 * buf: Pointer to a pointer where the object is stored.
41 * size: The number of bytes to allocate.
42 */
43int drm_buffer_alloc(struct drm_buffer **buf, int size)
44{
45 int nr_pages = size / PAGE_SIZE + 1;
46 int idx;
47
48 /* Allocating pointer table to end of structure makes drm_buffer
49 * variable sized */
50 *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
51 GFP_KERNEL);
52
53 if (*buf == NULL) {
54 DRM_ERROR("Failed to allocate drm buffer object to hold"
55 " %d bytes in %d pages.\n",
56 size, nr_pages);
57 return -ENOMEM;
58 }
59
60 (*buf)->size = size;
61
62 for (idx = 0; idx < nr_pages; ++idx) {
63
64 (*buf)->data[idx] =
65 kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
66 GFP_KERNEL);
67
68
69 if ((*buf)->data[idx] == NULL) {
70 DRM_ERROR("Failed to allocate %dth page for drm"
71 " buffer with %d bytes and %d pages.\n",
72 idx + 1, size, nr_pages);
73 goto error_out;
74 }
75
76 }
77
78 return 0;
79
80error_out:
81
82 /* Only last element can be null pointer so check for it first. */
83 if ((*buf)->data[idx])
84 kfree((*buf)->data[idx]);
85
86 for (--idx; idx >= 0; --idx)
87 kfree((*buf)->data[idx]);
88
89 kfree(*buf);
90 return -ENOMEM;
91}
92EXPORT_SYMBOL(drm_buffer_alloc);
93
94/**
95 * Copy the user data to the begin of the buffer and reset the processing
96 * iterator.
97 *
98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy.
100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size)
103{
104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx;
106
107 if (size > buf->size) {
108 DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
109 " %d bytes space\n",
110 size, buf->size);
111 return -EFAULT;
112 }
113
114 for (idx = 0; idx < nr_pages; ++idx) {
115
116 if (DRM_COPY_FROM_USER(buf->data[idx],
117 user_data + idx * PAGE_SIZE,
118 min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
119 DRM_ERROR("Failed to copy user data (%p) to drm buffer"
120 " (%p) %dth page.\n",
121 user_data, buf, idx);
122 return -EFAULT;
123
124 }
125 }
126 buf->iterator = 0;
127 return 0;
128}
129EXPORT_SYMBOL(drm_buffer_copy_from_user);
130
131/**
132 * Free the drm buffer object
133 */
134void drm_buffer_free(struct drm_buffer *buf)
135{
136
137 if (buf != NULL) {
138
139 int nr_pages = buf->size / PAGE_SIZE + 1;
140 int idx;
141 for (idx = 0; idx < nr_pages; ++idx)
142 kfree(buf->data[idx]);
143
144 kfree(buf);
145 }
146}
147EXPORT_SYMBOL(drm_buffer_free);
148
149/**
150 * Read an object from buffer that may be split to multiple parts. If object
151 * is not split function just returns the pointer to object in buffer. But in
152 * case of split object data is copied to given stack object that is suplied
153 * by caller.
154 *
155 * The processing location of the buffer is also advanced to the next byte
156 * after the object.
157 *
158 * objsize: The size of the objet in bytes.
159 * stack_obj: A pointer to a memory location where object can be copied.
160 */
161void *drm_buffer_read_object(struct drm_buffer *buf,
162 int objsize, void *stack_obj)
163{
164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf);
166 void *obj = 0;
167
168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx];
170 } else {
171 /* The object is split which forces copy to temporary object.*/
172 int beginsz = PAGE_SIZE - idx;
173 memcpy(stack_obj, &buf->data[page][idx], beginsz);
174
175 memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
176 objsize - beginsz);
177
178 obj = stack_obj;
179 }
180
181 drm_buffer_advance(buf, objsize);
182 return obj;
183}
184EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7d0f00a935fa..f2aaf39be398 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
836 mode_changed = true; 836 mode_changed = true;
837 } else if (set->fb == NULL) { 837 } else if (set->fb == NULL) {
838 mode_changed = true; 838 mode_changed = true;
839 } else if ((set->fb->bits_per_pixel != 839 } else
840 set->crtc->fb->bits_per_pixel) ||
841 set->fb->depth != set->crtc->fb->depth)
842 fb_changed = true;
843 else
844 fb_changed = true; 840 fb_changed = true;
845 } 841 }
846 842
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 766c46875a20..f3c58e2bd75c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 125
126 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
129 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), 129 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
130 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), 130 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
131 131
132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), 132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), 133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), 134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), 136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), 137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), 138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), 139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), 142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), 146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), 147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) 149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
150}; 150};
151 151
152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ab6c97330412..f97e7c42ac8e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -60,8 +60,7 @@
60#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) 60#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
61/* use +hsync +vsync for detailed mode */ 61/* use +hsync +vsync for detailed mode */
62#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 62#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
63/* define the number of Extension EDID block */ 63
64#define MAX_EDID_EXT_NUM 4
65 64
66#define LEVEL_DMT 0 65#define LEVEL_DMT 0
67#define LEVEL_GTF 1 66#define LEVEL_GTF 1
@@ -114,14 +113,14 @@ static const u8 edid_header[] = {
114}; 113};
115 114
116/** 115/**
117 * edid_is_valid - sanity check EDID data 116 * drm_edid_is_valid - sanity check EDID data
118 * @edid: EDID data 117 * @edid: EDID data
119 * 118 *
120 * Sanity check the EDID block by looking at the header, the version number 119 * Sanity check the EDID block by looking at the header, the version number
121 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's 120 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
122 * valid. 121 * valid.
123 */ 122 */
124static bool edid_is_valid(struct edid *edid) 123bool drm_edid_is_valid(struct edid *edid)
125{ 124{
126 int i, score = 0; 125 int i, score = 0;
127 u8 csum = 0; 126 u8 csum = 0;
@@ -163,6 +162,7 @@ bad:
163 } 162 }
164 return 0; 163 return 0;
165} 164}
165EXPORT_SYMBOL(drm_edid_is_valid);
166 166
167/** 167/**
168 * edid_vendor - match a string against EDID's obfuscated vendor field 168 * edid_vendor - match a string against EDID's obfuscated vendor field
@@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1112 } 1112 }
1113 1113
1114 /* Chose real EDID extension number */ 1114 /* Chose real EDID extension number */
1115 edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? 1115 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1116 MAX_EDID_EXT_NUM : edid->extensions; 1116 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1117 1117
1118 /* Find CEA extension */ 1118 /* Find CEA extension */
1119 for (i = 0; i < edid_ext_num; i++) { 1119 for (i = 0; i < edid_ext_num; i++) {
@@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
1195 for (i = 0; i < 4; i++) { 1195 for (i = 0; i < 4; i++) {
1196 if (drm_do_probe_ddc_edid(adapter, buf, len)) 1196 if (drm_do_probe_ddc_edid(adapter, buf, len))
1197 return -1; 1197 return -1;
1198 if (edid_is_valid((struct edid *)buf)) 1198 if (drm_edid_is_valid((struct edid *)buf))
1199 return 0; 1199 return 0;
1200 } 1200 }
1201 1201
@@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1220 int ret; 1220 int ret;
1221 struct edid *edid; 1221 struct edid *edid;
1222 1222
1223 edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1), 1223 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
1224 GFP_KERNEL); 1224 GFP_KERNEL);
1225 if (edid == NULL) { 1225 if (edid == NULL) {
1226 dev_warn(&connector->dev->pdev->dev, 1226 dev_warn(&connector->dev->pdev->dev,
@@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1238 if (edid->extensions != 0) { 1238 if (edid->extensions != 0) {
1239 int edid_ext_num = edid->extensions; 1239 int edid_ext_num = edid->extensions;
1240 1240
1241 if (edid_ext_num > MAX_EDID_EXT_NUM) { 1241 if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
1242 dev_warn(&connector->dev->pdev->dev, 1242 dev_warn(&connector->dev->pdev->dev,
1243 "The number of extension(%d) is " 1243 "The number of extension(%d) is "
1244 "over max (%d), actually read number (%d)\n", 1244 "over max (%d), actually read number (%d)\n",
1245 edid_ext_num, MAX_EDID_EXT_NUM, 1245 edid_ext_num, DRM_MAX_EDID_EXT_NUM,
1246 MAX_EDID_EXT_NUM); 1246 DRM_MAX_EDID_EXT_NUM);
1247 /* Reset EDID extension number to be read */ 1247 /* Reset EDID extension number to be read */
1248 edid_ext_num = MAX_EDID_EXT_NUM; 1248 edid_ext_num = DRM_MAX_EDID_EXT_NUM;
1249 } 1249 }
1250 /* Read EDID including extensions too */ 1250 /* Read EDID including extensions too */
1251 ret = drm_ddc_read_edid(connector, adapter, (char *)edid, 1251 ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
@@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
1288 goto end; 1288 goto end;
1289 1289
1290 /* Chose real EDID extension number */ 1290 /* Chose real EDID extension number */
1291 edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? 1291 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1292 MAX_EDID_EXT_NUM : edid->extensions; 1292 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1293 1293
1294 /* Find CEA extension */ 1294 /* Find CEA extension */
1295 for (i = 0; i < edid_ext_num; i++) { 1295 for (i = 0; i < edid_ext_num; i++) {
@@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1346 if (edid == NULL) { 1346 if (edid == NULL) {
1347 return 0; 1347 return 0;
1348 } 1348 }
1349 if (!edid_is_valid(edid)) { 1349 if (!drm_edid_is_valid(edid)) {
1350 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", 1350 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1351 drm_get_connector_name(connector)); 1351 drm_get_connector_name(connector));
1352 return 0; 1352 return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0f9e90552dc4..50549703584f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,7 @@
27 * Dave Airlie <airlied@linux.ie> 27 * Dave Airlie <airlied@linux.ie>
28 * Jesse Barnes <jesse.barnes@intel.com> 28 * Jesse Barnes <jesse.barnes@intel.com>
29 */ 29 */
30#include <linux/kernel.h>
30#include <linux/sysrq.h> 31#include <linux/sysrq.h>
31#include <linux/fb.h> 32#include <linux/fb.h>
32#include "drmP.h" 33#include "drmP.h"
@@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector)
50} 51}
51EXPORT_SYMBOL(drm_fb_helper_add_connector); 52EXPORT_SYMBOL(drm_fb_helper_add_connector);
52 53
53static int my_atoi(const char *name)
54{
55 int val = 0;
56
57 for (;; name++) {
58 switch (*name) {
59 case '0' ... '9':
60 val = 10*val+(*name-'0');
61 break;
62 default:
63 return val;
64 }
65 }
66}
67
68/** 54/**
69 * drm_fb_helper_connector_parse_command_line - parse command line for connector 55 * drm_fb_helper_connector_parse_command_line - parse command line for connector
70 * @connector - connector to parse line for 56 * @connector - connector to parse line for
@@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
111 namelen = i; 97 namelen = i;
112 if (!refresh_specified && !bpp_specified && 98 if (!refresh_specified && !bpp_specified &&
113 !yres_specified) { 99 !yres_specified) {
114 refresh = my_atoi(&name[i+1]); 100 refresh = simple_strtol(&name[i+1], NULL, 10);
115 refresh_specified = 1; 101 refresh_specified = 1;
116 if (cvt || rb) 102 if (cvt || rb)
117 cvt = 0; 103 cvt = 0;
@@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
121 case '-': 107 case '-':
122 namelen = i; 108 namelen = i;
123 if (!bpp_specified && !yres_specified) { 109 if (!bpp_specified && !yres_specified) {
124 bpp = my_atoi(&name[i+1]); 110 bpp = simple_strtol(&name[i+1], NULL, 10);
125 bpp_specified = 1; 111 bpp_specified = 1;
126 if (cvt || rb) 112 if (cvt || rb)
127 cvt = 0; 113 cvt = 0;
@@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
130 break; 116 break;
131 case 'x': 117 case 'x':
132 if (!yres_specified) { 118 if (!yres_specified) {
133 yres = my_atoi(&name[i+1]); 119 yres = simple_strtol(&name[i+1], NULL, 10);
134 yres_specified = 1; 120 yres_specified = 1;
135 } else 121 } else
136 goto done; 122 goto done;
@@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
170 } 156 }
171 } 157 }
172 if (i < 0 && yres_specified) { 158 if (i < 0 && yres_specified) {
173 xres = my_atoi(name); 159 xres = simple_strtol(name, NULL, 10);
174 res_specified = 1; 160 res_specified = 1;
175 } 161 }
176done: 162done:
@@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
694 int i; 680 int i;
695 681
696 if (var->pixclock != 0) { 682 if (var->pixclock != 0) {
697 DRM_ERROR("PIXEL CLCOK SET\n"); 683 DRM_ERROR("PIXEL CLOCK SET\n");
698 return -EINVAL; 684 return -EINVAL;
699 } 685 }
700 686
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8bf3770f294e..aa89d4b0b4c4 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
192 idr_remove(&filp->object_idr, handle); 192 idr_remove(&filp->object_idr, handle);
193 spin_unlock(&filp->table_lock); 193 spin_unlock(&filp->table_lock);
194 194
195 mutex_lock(&dev->struct_mutex); 195 drm_gem_object_handle_unreference_unlocked(obj);
196 drm_gem_object_handle_unreference(obj);
197 mutex_unlock(&dev->struct_mutex);
198 196
199 return 0; 197 return 0;
200} 198}
@@ -325,9 +323,7 @@ again:
325 } 323 }
326 324
327err: 325err:
328 mutex_lock(&dev->struct_mutex); 326 drm_gem_object_unreference_unlocked(obj);
329 drm_gem_object_unreference(obj);
330 mutex_unlock(&dev->struct_mutex);
331 return ret; 327 return ret;
332} 328}
333 329
@@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
358 return -ENOENT; 354 return -ENOENT;
359 355
360 ret = drm_gem_handle_create(file_priv, obj, &handle); 356 ret = drm_gem_handle_create(file_priv, obj, &handle);
361 mutex_lock(&dev->struct_mutex); 357 drm_gem_object_unreference_unlocked(obj);
362 drm_gem_object_unreference(obj);
363 mutex_unlock(&dev->struct_mutex);
364 if (ret) 358 if (ret)
365 return ret; 359 return ret;
366 360
@@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
390{ 384{
391 struct drm_gem_object *obj = ptr; 385 struct drm_gem_object *obj = ptr;
392 386
393 drm_gem_object_handle_unreference(obj); 387 drm_gem_object_handle_unreference_unlocked(obj);
394 388
395 return 0; 389 return 0;
396} 390}
@@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
403void 397void
404drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 398drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
405{ 399{
406 mutex_lock(&dev->struct_mutex);
407 idr_for_each(&file_private->object_idr, 400 idr_for_each(&file_private->object_idr,
408 &drm_gem_object_release_handle, NULL); 401 &drm_gem_object_release_handle, NULL);
409 402
410 idr_destroy(&file_private->object_idr); 403 idr_destroy(&file_private->object_idr);
411 mutex_unlock(&dev->struct_mutex); 404}
405
406static void
407drm_gem_object_free_common(struct drm_gem_object *obj)
408{
409 struct drm_device *dev = obj->dev;
410 fput(obj->filp);
411 atomic_dec(&dev->object_count);
412 atomic_sub(obj->size, &dev->object_memory);
413 kfree(obj);
412} 414}
413 415
414/** 416/**
415 * Called after the last reference to the object has been lost. 417 * Called after the last reference to the object has been lost.
418 * Must be called holding struct_ mutex
416 * 419 *
417 * Frees the object 420 * Frees the object
418 */ 421 */
@@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref)
427 if (dev->driver->gem_free_object != NULL) 430 if (dev->driver->gem_free_object != NULL)
428 dev->driver->gem_free_object(obj); 431 dev->driver->gem_free_object(obj);
429 432
430 fput(obj->filp); 433 drm_gem_object_free_common(obj);
431 atomic_dec(&dev->object_count);
432 atomic_sub(obj->size, &dev->object_memory);
433 kfree(obj);
434} 434}
435EXPORT_SYMBOL(drm_gem_object_free); 435EXPORT_SYMBOL(drm_gem_object_free);
436 436
437/** 437/**
438 * Called after the last reference to the object has been lost.
439 * Must be called without holding struct_mutex
440 *
441 * Frees the object
442 */
443void
444drm_gem_object_free_unlocked(struct kref *kref)
445{
446 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
447 struct drm_device *dev = obj->dev;
448
449 if (dev->driver->gem_free_object_unlocked != NULL)
450 dev->driver->gem_free_object_unlocked(obj);
451 else if (dev->driver->gem_free_object != NULL) {
452 mutex_lock(&dev->struct_mutex);
453 dev->driver->gem_free_object(obj);
454 mutex_unlock(&dev->struct_mutex);
455 }
456
457 drm_gem_object_free_common(obj);
458}
459EXPORT_SYMBOL(drm_gem_object_free_unlocked);
460
461static void drm_gem_object_ref_bug(struct kref *list_kref)
462{
463 BUG();
464}
465
466/**
438 * Called after the last handle to the object has been closed 467 * Called after the last handle to the object has been closed
439 * 468 *
440 * Removes any name for the object. Note that this must be 469 * Removes any name for the object. Note that this must be
@@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref)
458 /* 487 /*
459 * The object name held a reference to this object, drop 488 * The object name held a reference to this object, drop
460 * that now. 489 * that now.
490 *
491 * This cannot be the last reference, since the handle holds one too.
461 */ 492 */
462 drm_gem_object_unreference(obj); 493 kref_put(&obj->refcount, drm_gem_object_ref_bug);
463 } else 494 } else
464 spin_unlock(&dev->object_name_lock); 495 spin_unlock(&dev->object_name_lock);
465 496
@@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open);
477void drm_gem_vm_close(struct vm_area_struct *vma) 508void drm_gem_vm_close(struct vm_area_struct *vma)
478{ 509{
479 struct drm_gem_object *obj = vma->vm_private_data; 510 struct drm_gem_object *obj = vma->vm_private_data;
480 struct drm_device *dev = obj->dev;
481 511
482 mutex_lock(&dev->struct_mutex); 512 drm_gem_object_unreference_unlocked(obj);
483 drm_gem_object_unreference(obj);
484 mutex_unlock(&dev->struct_mutex);
485} 513}
486EXPORT_SYMBOL(drm_gem_vm_close); 514EXPORT_SYMBOL(drm_gem_vm_close);
487 515
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade03093..1376dfe44c95 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
162 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
164 164
165 if (!IS_IRONLAKE(dev)) { 165 if (!HAS_PCH_SPLIT(dev)) {
166 seq_printf(m, "Interrupt enable: %08x\n", 166 seq_printf(m, "Interrupt enable: %08x\n",
167 I915_READ(IER)); 167 I915_READ(IER));
168 seq_printf(m, "Interrupt identity: %08x\n", 168 seq_printf(m, "Interrupt identity: %08x\n",
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
350 return 0; 350 return 0;
351} 351}
352 352
353static const char *pin_flag(int pinned)
354{
355 if (pinned > 0)
356 return " P";
357 else if (pinned < 0)
358 return " p";
359 else
360 return "";
361}
362
363static const char *tiling_flag(int tiling)
364{
365 switch (tiling) {
366 default:
367 case I915_TILING_NONE: return "";
368 case I915_TILING_X: return " X";
369 case I915_TILING_Y: return " Y";
370 }
371}
372
373static const char *dirty_flag(int dirty)
374{
375 return dirty ? " dirty" : "";
376}
377
378static const char *purgeable_flag(int purgeable)
379{
380 return purgeable ? " purgeable" : "";
381}
382
353static int i915_error_state(struct seq_file *m, void *unused) 383static int i915_error_state(struct seq_file *m, void *unused)
354{ 384{
355 struct drm_info_node *node = (struct drm_info_node *) m->private; 385 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
357 drm_i915_private_t *dev_priv = dev->dev_private; 387 drm_i915_private_t *dev_priv = dev->dev_private;
358 struct drm_i915_error_state *error; 388 struct drm_i915_error_state *error;
359 unsigned long flags; 389 unsigned long flags;
390 int i, page, offset, elt;
360 391
361 spin_lock_irqsave(&dev_priv->error_lock, flags); 392 spin_lock_irqsave(&dev_priv->error_lock, flags);
362 if (!dev_priv->first_error) { 393 if (!dev_priv->first_error) {
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
368 399
369 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 400 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
370 error->time.tv_usec); 401 error->time.tv_usec);
402 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
371 seq_printf(m, "EIR: 0x%08x\n", error->eir); 403 seq_printf(m, "EIR: 0x%08x\n", error->eir);
372 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 404 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
373 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 405 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
379 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 411 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
380 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 412 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
381 } 413 }
414 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
415
416 if (error->active_bo_count) {
417 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
418
419 for (i = 0; i < error->active_bo_count; i++) {
420 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
421 error->active_bo[i].gtt_offset,
422 error->active_bo[i].size,
423 error->active_bo[i].read_domains,
424 error->active_bo[i].write_domain,
425 error->active_bo[i].seqno,
426 pin_flag(error->active_bo[i].pinned),
427 tiling_flag(error->active_bo[i].tiling),
428 dirty_flag(error->active_bo[i].dirty),
429 purgeable_flag(error->active_bo[i].purgeable));
430
431 if (error->active_bo[i].name)
432 seq_printf(m, " (name: %d)", error->active_bo[i].name);
433 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
434 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
435
436 seq_printf(m, "\n");
437 }
438 }
439
440 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
441 if (error->batchbuffer[i]) {
442 struct drm_i915_error_object *obj = error->batchbuffer[i];
443
444 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
445 offset = 0;
446 for (page = 0; page < obj->page_count; page++) {
447 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
448 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
449 offset += 4;
450 }
451 }
452 }
453 }
454
455 if (error->ringbuffer) {
456 struct drm_i915_error_object *obj = error->ringbuffer;
457
458 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
459 offset = 0;
460 for (page = 0; page < obj->page_count; page++) {
461 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
462 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
463 offset += 4;
464 }
465 }
466 }
382 467
383out: 468out:
384 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 469 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +471,165 @@ out:
386 return 0; 471 return 0;
387} 472}
388 473
474static int i915_rstdby_delays(struct seq_file *m, void *unused)
475{
476 struct drm_info_node *node = (struct drm_info_node *) m->private;
477 struct drm_device *dev = node->minor->dev;
478 drm_i915_private_t *dev_priv = dev->dev_private;
479 u16 crstanddelay = I915_READ16(CRSTANDVID);
480
481 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
482
483 return 0;
484}
485
486static int i915_cur_delayinfo(struct seq_file *m, void *unused)
487{
488 struct drm_info_node *node = (struct drm_info_node *) m->private;
489 struct drm_device *dev = node->minor->dev;
490 drm_i915_private_t *dev_priv = dev->dev_private;
491 u16 rgvswctl = I915_READ16(MEMSWCTL);
492
493 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
494 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
495 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
496 rgvswctl & 0x3f);
497
498 return 0;
499}
500
501static int i915_delayfreq_table(struct seq_file *m, void *unused)
502{
503 struct drm_info_node *node = (struct drm_info_node *) m->private;
504 struct drm_device *dev = node->minor->dev;
505 drm_i915_private_t *dev_priv = dev->dev_private;
506 u32 delayfreq;
507 int i;
508
509 for (i = 0; i < 16; i++) {
510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
511 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
512 }
513
514 return 0;
515}
516
517static inline int MAP_TO_MV(int map)
518{
519 return 1250 - (map * 25);
520}
521
522static int i915_inttoext_table(struct seq_file *m, void *unused)
523{
524 struct drm_info_node *node = (struct drm_info_node *) m->private;
525 struct drm_device *dev = node->minor->dev;
526 drm_i915_private_t *dev_priv = dev->dev_private;
527 u32 inttoext;
528 int i;
529
530 for (i = 1; i <= 32; i++) {
531 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
532 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
533 }
534
535 return 0;
536}
537
538static int i915_drpc_info(struct seq_file *m, void *unused)
539{
540 struct drm_info_node *node = (struct drm_info_node *) m->private;
541 struct drm_device *dev = node->minor->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private;
543 u32 rgvmodectl = I915_READ(MEMMODECTL);
544
545 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
546 "yes" : "no");
547 seq_printf(m, "Boost freq: %d\n",
548 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
549 MEMMODE_BOOST_FREQ_SHIFT);
550 seq_printf(m, "HW control enabled: %s\n",
551 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
552 seq_printf(m, "SW control enabled: %s\n",
553 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
554 seq_printf(m, "Gated voltage change: %s\n",
555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
556 seq_printf(m, "Starting frequency: P%d\n",
557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
558 seq_printf(m, "Max frequency: P%d\n",
559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
560 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
561
562 return 0;
563}
564
565static int i915_fbc_status(struct seq_file *m, void *unused)
566{
567 struct drm_info_node *node = (struct drm_info_node *) m->private;
568 struct drm_device *dev = node->minor->dev;
569 struct drm_crtc *crtc;
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 bool fbc_enabled = false;
572
573 if (!dev_priv->display.fbc_enabled) {
574 seq_printf(m, "FBC unsupported on this chipset\n");
575 return 0;
576 }
577
578 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
579 if (!crtc->enabled)
580 continue;
581 if (dev_priv->display.fbc_enabled(crtc))
582 fbc_enabled = true;
583 }
584
585 if (fbc_enabled) {
586 seq_printf(m, "FBC enabled\n");
587 } else {
588 seq_printf(m, "FBC disabled: ");
589 switch (dev_priv->no_fbc_reason) {
590 case FBC_STOLEN_TOO_SMALL:
591 seq_printf(m, "not enough stolen memory");
592 break;
593 case FBC_UNSUPPORTED_MODE:
594 seq_printf(m, "mode not supported");
595 break;
596 case FBC_MODE_TOO_LARGE:
597 seq_printf(m, "mode too large");
598 break;
599 case FBC_BAD_PLANE:
600 seq_printf(m, "FBC unsupported on plane");
601 break;
602 case FBC_NOT_TILED:
603 seq_printf(m, "scanout buffer not tiled");
604 break;
605 default:
606 seq_printf(m, "unknown reason");
607 }
608 seq_printf(m, "\n");
609 }
610 return 0;
611}
612
613static int i915_sr_status(struct seq_file *m, void *unused)
614{
615 struct drm_info_node *node = (struct drm_info_node *) m->private;
616 struct drm_device *dev = node->minor->dev;
617 drm_i915_private_t *dev_priv = dev->dev_private;
618 bool sr_enabled = false;
619
620 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
621 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
622 else if (IS_I915GM(dev))
623 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
624 else if (IS_PINEVIEW(dev))
625 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
626
627 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
628 "disabled");
629
630 return 0;
631}
632
389static int 633static int
390i915_wedged_open(struct inode *inode, 634i915_wedged_open(struct inode *inode,
391 struct file *filp) 635 struct file *filp)
@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = {
503 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 747 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
504 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 748 {"i915_batchbuffers", i915_batchbuffer_info, 0},
505 {"i915_error_state", i915_error_state, 0}, 749 {"i915_error_state", i915_error_state, 0},
750 {"i915_rstdby_delays", i915_rstdby_delays, 0},
751 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
752 {"i915_delayfreq_table", i915_delayfreq_table, 0},
753 {"i915_inttoext_table", i915_inttoext_table, 0},
754 {"i915_drpc_info", i915_drpc_info, 0},
755 {"i915_fbc_status", i915_fbc_status, 0},
756 {"i915_sr_status", i915_sr_status, 0},
506}; 757};
507#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 758#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
508 759
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f7..8bfc0bbf13e6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,9 @@
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include <linux/vgaarb.h> 37#include <linux/vgaarb.h>
38#include <linux/acpi.h>
39#include <linux/pnp.h>
40#include <linux/vga_switcheroo.h>
38 41
39/* Really want an OS-independent resettable timer. Would like to have 42/* Really want an OS-independent resettable timer. Would like to have
40 * this loop run for (eg) 3 sec, but have the timer reset every time 43 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -933,6 +936,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
933 return 0; 936 return 0;
934} 937}
935 938
939#define MCHBAR_I915 0x44
940#define MCHBAR_I965 0x48
941#define MCHBAR_SIZE (4*4096)
942
943#define DEVEN_REG 0x54
944#define DEVEN_MCHBAR_EN (1 << 28)
945
946/* Allocate space for the MCH regs if needed, return nonzero on error */
947static int
948intel_alloc_mchbar_resource(struct drm_device *dev)
949{
950 drm_i915_private_t *dev_priv = dev->dev_private;
951 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
952 u32 temp_lo, temp_hi = 0;
953 u64 mchbar_addr;
954 int ret = 0;
955
956 if (IS_I965G(dev))
957 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
958 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
959 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
960
961 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
962#ifdef CONFIG_PNP
963 if (mchbar_addr &&
964 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
965 ret = 0;
966 goto out;
967 }
968#endif
969
970 /* Get some space for it */
971 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
972 MCHBAR_SIZE, MCHBAR_SIZE,
973 PCIBIOS_MIN_MEM,
974 0, pcibios_align_resource,
975 dev_priv->bridge_dev);
976 if (ret) {
977 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
978 dev_priv->mch_res.start = 0;
979 goto out;
980 }
981
982 if (IS_I965G(dev))
983 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
984 upper_32_bits(dev_priv->mch_res.start));
985
986 pci_write_config_dword(dev_priv->bridge_dev, reg,
987 lower_32_bits(dev_priv->mch_res.start));
988out:
989 return ret;
990}
991
992/* Setup MCHBAR if possible, return true if we should disable it again */
993static void
994intel_setup_mchbar(struct drm_device *dev)
995{
996 drm_i915_private_t *dev_priv = dev->dev_private;
997 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
998 u32 temp;
999 bool enabled;
1000
1001 dev_priv->mchbar_need_disable = false;
1002
1003 if (IS_I915G(dev) || IS_I915GM(dev)) {
1004 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1005 enabled = !!(temp & DEVEN_MCHBAR_EN);
1006 } else {
1007 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1008 enabled = temp & 1;
1009 }
1010
1011 /* If it's already enabled, don't have to do anything */
1012 if (enabled)
1013 return;
1014
1015 if (intel_alloc_mchbar_resource(dev))
1016 return;
1017
1018 dev_priv->mchbar_need_disable = true;
1019
1020 /* Space is allocated or reserved, so enable it. */
1021 if (IS_I915G(dev) || IS_I915GM(dev)) {
1022 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1023 temp | DEVEN_MCHBAR_EN);
1024 } else {
1025 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1026 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1027 }
1028}
1029
1030static void
1031intel_teardown_mchbar(struct drm_device *dev)
1032{
1033 drm_i915_private_t *dev_priv = dev->dev_private;
1034 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
1035 u32 temp;
1036
1037 if (dev_priv->mchbar_need_disable) {
1038 if (IS_I915G(dev) || IS_I915GM(dev)) {
1039 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1040 temp &= ~DEVEN_MCHBAR_EN;
1041 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1042 } else {
1043 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1044 temp &= ~1;
1045 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1046 }
1047 }
1048
1049 if (dev_priv->mch_res.start)
1050 release_resource(&dev_priv->mch_res);
1051}
1052
936/** 1053/**
937 * i915_probe_agp - get AGP bootup configuration 1054 * i915_probe_agp - get AGP bootup configuration
938 * @pdev: PCI device 1055 * @pdev: PCI device
@@ -978,59 +1095,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
978 * Some of the preallocated space is taken by the GTT 1095 * Some of the preallocated space is taken by the GTT
979 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 1096 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
980 */ 1097 */
981 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) 1098 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
982 overhead = 4096; 1099 overhead = 4096;
983 else 1100 else
984 overhead = (*aperture_size / 1024) + 4096; 1101 overhead = (*aperture_size / 1024) + 4096;
985 1102
986 switch (tmp & INTEL_GMCH_GMS_MASK) { 1103 if (IS_GEN6(dev)) {
987 case INTEL_855_GMCH_GMS_DISABLED: 1104 /* SNB has memory control reg at 0x50.w */
988 DRM_ERROR("video memory is disabled\n"); 1105 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
989 return -1; 1106
990 case INTEL_855_GMCH_GMS_STOLEN_1M: 1107 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
991 stolen = 1 * 1024 * 1024; 1108 case INTEL_855_GMCH_GMS_DISABLED:
992 break; 1109 DRM_ERROR("video memory is disabled\n");
993 case INTEL_855_GMCH_GMS_STOLEN_4M: 1110 return -1;
994 stolen = 4 * 1024 * 1024; 1111 case SNB_GMCH_GMS_STOLEN_32M:
995 break; 1112 stolen = 32 * 1024 * 1024;
996 case INTEL_855_GMCH_GMS_STOLEN_8M: 1113 break;
997 stolen = 8 * 1024 * 1024; 1114 case SNB_GMCH_GMS_STOLEN_64M:
998 break; 1115 stolen = 64 * 1024 * 1024;
999 case INTEL_855_GMCH_GMS_STOLEN_16M: 1116 break;
1000 stolen = 16 * 1024 * 1024; 1117 case SNB_GMCH_GMS_STOLEN_96M:
1001 break; 1118 stolen = 96 * 1024 * 1024;
1002 case INTEL_855_GMCH_GMS_STOLEN_32M: 1119 break;
1003 stolen = 32 * 1024 * 1024; 1120 case SNB_GMCH_GMS_STOLEN_128M:
1004 break; 1121 stolen = 128 * 1024 * 1024;
1005 case INTEL_915G_GMCH_GMS_STOLEN_48M: 1122 break;
1006 stolen = 48 * 1024 * 1024; 1123 case SNB_GMCH_GMS_STOLEN_160M:
1007 break; 1124 stolen = 160 * 1024 * 1024;
1008 case INTEL_915G_GMCH_GMS_STOLEN_64M: 1125 break;
1009 stolen = 64 * 1024 * 1024; 1126 case SNB_GMCH_GMS_STOLEN_192M:
1010 break; 1127 stolen = 192 * 1024 * 1024;
1011 case INTEL_GMCH_GMS_STOLEN_128M: 1128 break;
1012 stolen = 128 * 1024 * 1024; 1129 case SNB_GMCH_GMS_STOLEN_224M:
1013 break; 1130 stolen = 224 * 1024 * 1024;
1014 case INTEL_GMCH_GMS_STOLEN_256M: 1131 break;
1015 stolen = 256 * 1024 * 1024; 1132 case SNB_GMCH_GMS_STOLEN_256M:
1016 break; 1133 stolen = 256 * 1024 * 1024;
1017 case INTEL_GMCH_GMS_STOLEN_96M: 1134 break;
1018 stolen = 96 * 1024 * 1024; 1135 case SNB_GMCH_GMS_STOLEN_288M:
1019 break; 1136 stolen = 288 * 1024 * 1024;
1020 case INTEL_GMCH_GMS_STOLEN_160M: 1137 break;
1021 stolen = 160 * 1024 * 1024; 1138 case SNB_GMCH_GMS_STOLEN_320M:
1022 break; 1139 stolen = 320 * 1024 * 1024;
1023 case INTEL_GMCH_GMS_STOLEN_224M: 1140 break;
1024 stolen = 224 * 1024 * 1024; 1141 case SNB_GMCH_GMS_STOLEN_352M:
1025 break; 1142 stolen = 352 * 1024 * 1024;
1026 case INTEL_GMCH_GMS_STOLEN_352M: 1143 break;
1027 stolen = 352 * 1024 * 1024; 1144 case SNB_GMCH_GMS_STOLEN_384M:
1028 break; 1145 stolen = 384 * 1024 * 1024;
1029 default: 1146 break;
1030 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1147 case SNB_GMCH_GMS_STOLEN_416M:
1031 tmp & INTEL_GMCH_GMS_MASK); 1148 stolen = 416 * 1024 * 1024;
1032 return -1; 1149 break;
1150 case SNB_GMCH_GMS_STOLEN_448M:
1151 stolen = 448 * 1024 * 1024;
1152 break;
1153 case SNB_GMCH_GMS_STOLEN_480M:
1154 stolen = 480 * 1024 * 1024;
1155 break;
1156 case SNB_GMCH_GMS_STOLEN_512M:
1157 stolen = 512 * 1024 * 1024;
1158 break;
1159 default:
1160 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1161 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1162 return -1;
1163 }
1164 } else {
1165 switch (tmp & INTEL_GMCH_GMS_MASK) {
1166 case INTEL_855_GMCH_GMS_DISABLED:
1167 DRM_ERROR("video memory is disabled\n");
1168 return -1;
1169 case INTEL_855_GMCH_GMS_STOLEN_1M:
1170 stolen = 1 * 1024 * 1024;
1171 break;
1172 case INTEL_855_GMCH_GMS_STOLEN_4M:
1173 stolen = 4 * 1024 * 1024;
1174 break;
1175 case INTEL_855_GMCH_GMS_STOLEN_8M:
1176 stolen = 8 * 1024 * 1024;
1177 break;
1178 case INTEL_855_GMCH_GMS_STOLEN_16M:
1179 stolen = 16 * 1024 * 1024;
1180 break;
1181 case INTEL_855_GMCH_GMS_STOLEN_32M:
1182 stolen = 32 * 1024 * 1024;
1183 break;
1184 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1185 stolen = 48 * 1024 * 1024;
1186 break;
1187 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1188 stolen = 64 * 1024 * 1024;
1189 break;
1190 case INTEL_GMCH_GMS_STOLEN_128M:
1191 stolen = 128 * 1024 * 1024;
1192 break;
1193 case INTEL_GMCH_GMS_STOLEN_256M:
1194 stolen = 256 * 1024 * 1024;
1195 break;
1196 case INTEL_GMCH_GMS_STOLEN_96M:
1197 stolen = 96 * 1024 * 1024;
1198 break;
1199 case INTEL_GMCH_GMS_STOLEN_160M:
1200 stolen = 160 * 1024 * 1024;
1201 break;
1202 case INTEL_GMCH_GMS_STOLEN_224M:
1203 stolen = 224 * 1024 * 1024;
1204 break;
1205 case INTEL_GMCH_GMS_STOLEN_352M:
1206 stolen = 352 * 1024 * 1024;
1207 break;
1208 default:
1209 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1210 tmp & INTEL_GMCH_GMS_MASK);
1211 return -1;
1212 }
1033 } 1213 }
1214
1034 *preallocated_size = stolen - overhead; 1215 *preallocated_size = stolen - overhead;
1035 *start = overhead; 1216 *start = overhead;
1036 1217
@@ -1064,7 +1245,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1064 int gtt_offset, gtt_size; 1245 int gtt_offset, gtt_size;
1065 1246
1066 if (IS_I965G(dev)) { 1247 if (IS_I965G(dev)) {
1067 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1248 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1068 gtt_offset = 2*1024*1024; 1249 gtt_offset = 2*1024*1024;
1069 gtt_size = 2*1024*1024; 1250 gtt_size = 2*1024*1024;
1070 } else { 1251 } else {
@@ -1133,6 +1314,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1133 /* Leave 1M for line length buffer & misc. */ 1314 /* Leave 1M for line length buffer & misc. */
1134 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1315 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1135 if (!compressed_fb) { 1316 if (!compressed_fb) {
1317 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1136 i915_warn_stolen(dev); 1318 i915_warn_stolen(dev);
1137 return; 1319 return;
1138 } 1320 }
@@ -1140,6 +1322,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1140 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1322 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1141 if (!compressed_fb) { 1323 if (!compressed_fb) {
1142 i915_warn_stolen(dev); 1324 i915_warn_stolen(dev);
1325 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1143 return; 1326 return;
1144 } 1327 }
1145 1328
@@ -1199,6 +1382,32 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
1199 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1382 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1200} 1383}
1201 1384
1385static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1386{
1387 struct drm_device *dev = pci_get_drvdata(pdev);
1388 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1389 if (state == VGA_SWITCHEROO_ON) {
1390 printk(KERN_INFO "i915: switched off\n");
1391 /* i915 resume handler doesn't set to D0 */
1392 pci_set_power_state(dev->pdev, PCI_D0);
1393 i915_resume(dev);
1394 } else {
1395 printk(KERN_ERR "i915: switched off\n");
1396 i915_suspend(dev, pmm);
1397 }
1398}
1399
1400static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1401{
1402 struct drm_device *dev = pci_get_drvdata(pdev);
1403 bool can_switch;
1404
1405 spin_lock(&dev->count_lock);
1406 can_switch = (dev->open_count == 0);
1407 spin_unlock(&dev->count_lock);
1408 return can_switch;
1409}
1410
1202static int i915_load_modeset_init(struct drm_device *dev, 1411static int i915_load_modeset_init(struct drm_device *dev,
1203 unsigned long prealloc_start, 1412 unsigned long prealloc_start,
1204 unsigned long prealloc_size, 1413 unsigned long prealloc_size,
@@ -1260,6 +1469,12 @@ static int i915_load_modeset_init(struct drm_device *dev,
1260 if (ret) 1469 if (ret)
1261 goto destroy_ringbuffer; 1470 goto destroy_ringbuffer;
1262 1471
1472 ret = vga_switcheroo_register_client(dev->pdev,
1473 i915_switcheroo_set_state,
1474 i915_switcheroo_can_switch);
1475 if (ret)
1476 goto destroy_ringbuffer;
1477
1263 intel_modeset_init(dev); 1478 intel_modeset_init(dev);
1264 1479
1265 ret = drm_irq_install(dev); 1480 ret = drm_irq_install(dev);
@@ -1281,7 +1496,9 @@ static int i915_load_modeset_init(struct drm_device *dev,
1281 return 0; 1496 return 0;
1282 1497
1283destroy_ringbuffer: 1498destroy_ringbuffer:
1499 mutex_lock(&dev->struct_mutex);
1284 i915_gem_cleanup_ringbuffer(dev); 1500 i915_gem_cleanup_ringbuffer(dev);
1501 mutex_unlock(&dev->struct_mutex);
1285out: 1502out:
1286 return ret; 1503 return ret;
1287} 1504}
@@ -1445,11 +1662,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1445 1662
1446 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1663 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1447 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1664 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1448 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1665 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1449 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1666 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1450 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1667 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1451 } 1668 }
1452 1669
1670 /* Try to make sure MCHBAR is enabled before poking at it */
1671 intel_setup_mchbar(dev);
1672
1453 i915_gem_load(dev); 1673 i915_gem_load(dev);
1454 1674
1455 /* Init HWS */ 1675 /* Init HWS */
@@ -1523,6 +1743,8 @@ int i915_driver_unload(struct drm_device *dev)
1523{ 1743{
1524 struct drm_i915_private *dev_priv = dev->dev_private; 1744 struct drm_i915_private *dev_priv = dev->dev_private;
1525 1745
1746 i915_destroy_error_state(dev);
1747
1526 destroy_workqueue(dev_priv->wq); 1748 destroy_workqueue(dev_priv->wq);
1527 del_timer_sync(&dev_priv->hangcheck_timer); 1749 del_timer_sync(&dev_priv->hangcheck_timer);
1528 1750
@@ -1544,6 +1766,7 @@ int i915_driver_unload(struct drm_device *dev)
1544 dev_priv->child_dev_num = 0; 1766 dev_priv->child_dev_num = 0;
1545 } 1767 }
1546 drm_irq_uninstall(dev); 1768 drm_irq_uninstall(dev);
1769 vga_switcheroo_unregister_client(dev->pdev);
1547 vga_client_register(dev->pdev, NULL, NULL, NULL); 1770 vga_client_register(dev->pdev, NULL, NULL, NULL);
1548 } 1771 }
1549 1772
@@ -1569,6 +1792,8 @@ int i915_driver_unload(struct drm_device *dev)
1569 intel_cleanup_overlay(dev); 1792 intel_cleanup_overlay(dev);
1570 } 1793 }
1571 1794
1795 intel_teardown_mchbar(dev);
1796
1572 pci_dev_put(dev_priv->bridge_dev); 1797 pci_dev_put(dev_priv->bridge_dev);
1573 kfree(dev->dev_private); 1798 kfree(dev->dev_private);
1574 1799
@@ -1611,6 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1611 1836
1612 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1837 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1613 drm_fb_helper_restore(); 1838 drm_fb_helper_restore();
1839 vga_switcheroo_process_delayed_switch();
1614 return; 1840 return;
1615 } 1841 }
1616 1842
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e9a0c2..1b2e95455c05 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50 50
51static struct drm_driver driver; 51static struct drm_driver driver;
52extern int intel_agp_enabled;
52 53
53#define INTEL_VGA_DEVICE(id, info) { \ 54#define INTEL_VGA_DEVICE(id, info) { \
54 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 55 .class = PCI_CLASS_DISPLAY_VGA << 8, \
@@ -136,6 +137,16 @@ const static struct intel_device_info intel_ironlake_m_info = {
136 .has_hotplug = 1, 137 .has_hotplug = 1,
137}; 138};
138 139
140const static struct intel_device_info intel_sandybridge_d_info = {
141 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
142 .has_hotplug = 1,
143};
144
145const static struct intel_device_info intel_sandybridge_m_info = {
146 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
147 .has_hotplug = 1,
148};
149
139const static struct pci_device_id pciidlist[] = { 150const static struct pci_device_id pciidlist[] = {
140 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
141 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
@@ -167,6 +178,8 @@ const static struct pci_device_id pciidlist[] = {
167 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 178 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
168 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 179 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
169 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 180 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
181 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
182 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
170 {0, 0, 0} 183 {0, 0, 0}
171}; 184};
172 185
@@ -201,7 +214,7 @@ static int i915_drm_freeze(struct drm_device *dev)
201 return 0; 214 return 0;
202} 215}
203 216
204static int i915_suspend(struct drm_device *dev, pm_message_t state) 217int i915_suspend(struct drm_device *dev, pm_message_t state)
205{ 218{
206 int error; 219 int error;
207 220
@@ -255,7 +268,7 @@ static int i915_drm_thaw(struct drm_device *dev)
255 return error; 268 return error;
256} 269}
257 270
258static int i915_resume(struct drm_device *dev) 271int i915_resume(struct drm_device *dev)
259{ 272{
260 if (pci_enable_device(dev->pdev)) 273 if (pci_enable_device(dev->pdev))
261 return -EIO; 274 return -EIO;
@@ -546,6 +559,11 @@ static struct drm_driver driver = {
546 559
547static int __init i915_init(void) 560static int __init i915_init(void)
548{ 561{
562 if (!intel_agp_enabled) {
563 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
564 return -ENODEV;
565 }
566
549 driver.num_ioctls = i915_max_ioctl; 567 driver.num_ioctls = i915_max_ioctl;
550 568
551 i915_gem_shrinker_init(); 569 i915_gem_shrinker_init();
@@ -571,6 +589,11 @@ static int __init i915_init(void)
571 driver.driver_features &= ~DRIVER_MODESET; 589 driver.driver_features &= ~DRIVER_MODESET;
572#endif 590#endif
573 591
592 if (!(driver.driver_features & DRIVER_MODESET)) {
593 driver.suspend = i915_suspend;
594 driver.resume = i915_resume;
595 }
596
574 return drm_init(&driver); 597 return drm_init(&driver);
575} 598}
576 599
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d95..979439cfb827 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
150 u32 instps; 150 u32 instps;
151 u32 instdone1; 151 u32 instdone1;
152 u32 seqno; 152 u32 seqno;
153 u64 bbaddr;
153 struct timeval time; 154 struct timeval time;
155 struct drm_i915_error_object {
156 int page_count;
157 u32 gtt_offset;
158 u32 *pages[0];
159 } *ringbuffer, *batchbuffer[2];
160 struct drm_i915_error_buffer {
161 size_t size;
162 u32 name;
163 u32 seqno;
164 u32 gtt_offset;
165 u32 read_domains;
166 u32 write_domain;
167 u32 fence_reg;
168 s32 pinned:2;
169 u32 tiling:2;
170 u32 dirty:1;
171 u32 purgeable:1;
172 } *active_bo;
173 u32 active_bo_count;
154}; 174};
155 175
156struct drm_i915_display_funcs { 176struct drm_i915_display_funcs {
@@ -192,6 +212,14 @@ struct intel_device_info {
192 u8 cursor_needs_physical : 1; 212 u8 cursor_needs_physical : 1;
193}; 213};
194 214
215enum no_fbc_reason {
216 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
217 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
218 FBC_MODE_TOO_LARGE, /* mode too large for compression */
219 FBC_BAD_PLANE, /* fbc not supported on plane */
220 FBC_NOT_TILED, /* buffer not tiled */
221};
222
195typedef struct drm_i915_private { 223typedef struct drm_i915_private {
196 struct drm_device *dev; 224 struct drm_device *dev;
197 225
@@ -452,6 +480,7 @@ typedef struct drm_i915_private {
452 u32 savePIPEB_DATA_N1; 480 u32 savePIPEB_DATA_N1;
453 u32 savePIPEB_LINK_M1; 481 u32 savePIPEB_LINK_M1;
454 u32 savePIPEB_LINK_N1; 482 u32 savePIPEB_LINK_N1;
483 u32 saveMCHBAR_RENDER_STANDBY;
455 484
456 struct { 485 struct {
457 struct drm_mm gtt_space; 486 struct drm_mm gtt_space;
@@ -590,6 +619,14 @@ typedef struct drm_i915_private {
590 int child_dev_num; 619 int child_dev_num;
591 struct child_device_config *child_dev; 620 struct child_device_config *child_dev;
592 struct drm_connector *int_lvds_connector; 621 struct drm_connector *int_lvds_connector;
622
623 bool mchbar_need_disable;
624
625 u8 cur_delay;
626 u8 min_delay;
627 u8 max_delay;
628
629 enum no_fbc_reason no_fbc_reason;
593} drm_i915_private_t; 630} drm_i915_private_t;
594 631
595/** driver private structure attached to each drm_gem_object */ 632/** driver private structure attached to each drm_gem_object */
@@ -736,6 +773,8 @@ extern unsigned int i915_fbpercrtc;
736extern unsigned int i915_powersave; 773extern unsigned int i915_powersave;
737extern unsigned int i915_lvds_downclock; 774extern unsigned int i915_lvds_downclock;
738 775
776extern int i915_suspend(struct drm_device *dev, pm_message_t state);
777extern int i915_resume(struct drm_device *dev);
739extern void i915_save_display(struct drm_device *dev); 778extern void i915_save_display(struct drm_device *dev);
740extern void i915_restore_display(struct drm_device *dev); 779extern void i915_restore_display(struct drm_device *dev);
741extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 780extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
@@ -761,6 +800,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
761 800
762/* i915_irq.c */ 801/* i915_irq.c */
763void i915_hangcheck_elapsed(unsigned long data); 802void i915_hangcheck_elapsed(unsigned long data);
803void i915_destroy_error_state(struct drm_device *dev);
764extern int i915_irq_emit(struct drm_device *dev, void *data, 804extern int i915_irq_emit(struct drm_device *dev, void *data,
765 struct drm_file *file_priv); 805 struct drm_file *file_priv);
766extern int i915_irq_wait(struct drm_device *dev, void *data, 806extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -897,7 +937,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
897void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 937void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
898bool i915_tiling_ok(struct drm_device *dev, int stride, int size, 938bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
899 int tiling_mode); 939 int tiling_mode);
900bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); 940bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
941 int tiling_mode);
901 942
902/* i915_gem_debug.c */ 943/* i915_gem_debug.c */
903void i915_gem_dump_object(struct drm_gem_object *obj, int len, 944void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -1026,7 +1067,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1026#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1067#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1068#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1069#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1029#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) 1070#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1071#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1072#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1073#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1045,8 +1086,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1086#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1087#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1047 1088
1089#define IS_GEN3(dev) (IS_I915G(dev) || \
1090 IS_I915GM(dev) || \
1091 IS_I945G(dev) || \
1092 IS_I945GM(dev) || \
1093 IS_G33(dev) || \
1094 IS_PINEVIEW(dev))
1095#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1096 (dev)->pci_device == 0x2982 || \
1097 (dev)->pci_device == 0x2992 || \
1098 (dev)->pci_device == 0x29A2 || \
1099 (dev)->pci_device == 0x2A02 || \
1100 (dev)->pci_device == 0x2A12 || \
1101 (dev)->pci_device == 0x2E02 || \
1102 (dev)->pci_device == 0x2E12 || \
1103 (dev)->pci_device == 0x2E22 || \
1104 (dev)->pci_device == 0x2E32 || \
1105 (dev)->pci_device == 0x2A42 || \
1106 (dev)->pci_device == 0x2E42)
1107
1048#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1108#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1049 1109
1110#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
1111
1050/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1112/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1051 * rows, which changed the alignment requirements and fence programming. 1113 * rows, which changed the alignment requirements and fence programming.
1052 */ 1114 */
@@ -1067,6 +1129,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1067#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1129#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1068#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1130#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1069 1131
1132#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1133 IS_GEN6(dev))
1134
1070#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1135#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1071 1136
1072#endif 1137#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ec8a0d7ffa39..fba37e9f775d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 ret = drm_gem_handle_create(file_priv, obj, &handle); 130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex); 131 drm_gem_object_handle_unreference_unlocked(obj);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
134 132
135 if (ret) 133 if (ret)
136 return ret; 134 return ret;
@@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
488 */ 486 */
489 if (args->offset > obj->size || args->size > obj->size || 487 if (args->offset > obj->size || args->size > obj->size ||
490 args->offset + args->size > obj->size) { 488 args->offset + args->size > obj->size) {
491 drm_gem_object_unreference(obj); 489 drm_gem_object_unreference_unlocked(obj);
492 return -EINVAL; 490 return -EINVAL;
493 } 491 }
494 492
@@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
501 file_priv); 499 file_priv);
502 } 500 }
503 501
504 drm_gem_object_unreference(obj); 502 drm_gem_object_unreference_unlocked(obj);
505 503
506 return ret; 504 return ret;
507} 505}
@@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
961 */ 959 */
962 if (args->offset > obj->size || args->size > obj->size || 960 if (args->offset > obj->size || args->size > obj->size ||
963 args->offset + args->size > obj->size) { 961 args->offset + args->size > obj->size) {
964 drm_gem_object_unreference(obj); 962 drm_gem_object_unreference_unlocked(obj);
965 return -EINVAL; 963 return -EINVAL;
966 } 964 }
967 965
@@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
995 DRM_INFO("pwrite failed %d\n", ret); 993 DRM_INFO("pwrite failed %d\n", ret);
996#endif 994#endif
997 995
998 drm_gem_object_unreference(obj); 996 drm_gem_object_unreference_unlocked(obj);
999 997
1000 return ret; 998 return ret;
1001} 999}
@@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1138 PROT_READ | PROT_WRITE, MAP_SHARED, 1136 PROT_READ | PROT_WRITE, MAP_SHARED,
1139 args->offset); 1137 args->offset);
1140 up_write(&current->mm->mmap_sem); 1138 up_write(&current->mm->mmap_sem);
1141 mutex_lock(&dev->struct_mutex); 1139 drm_gem_object_unreference_unlocked(obj);
1142 drm_gem_object_unreference(obj);
1143 mutex_unlock(&dev->struct_mutex);
1144 if (IS_ERR((void *)addr)) 1140 if (IS_ERR((void *)addr))
1145 return addr; 1141 return addr;
1146 1142
@@ -1562,6 +1558,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1562 i915_verify_inactive(dev, __FILE__, __LINE__); 1558 i915_verify_inactive(dev, __FILE__, __LINE__);
1563} 1559}
1564 1560
1561static void
1562i915_gem_process_flushing_list(struct drm_device *dev,
1563 uint32_t flush_domains, uint32_t seqno)
1564{
1565 drm_i915_private_t *dev_priv = dev->dev_private;
1566 struct drm_i915_gem_object *obj_priv, *next;
1567
1568 list_for_each_entry_safe(obj_priv, next,
1569 &dev_priv->mm.gpu_write_list,
1570 gpu_write_list) {
1571 struct drm_gem_object *obj = obj_priv->obj;
1572
1573 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) {
1575 uint32_t old_write_domain = obj->write_domain;
1576
1577 obj->write_domain = 0;
1578 list_del_init(&obj_priv->gpu_write_list);
1579 i915_gem_object_move_to_active(obj, seqno);
1580
1581 /* update the fence lru list */
1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1583 list_move_tail(&obj_priv->fence_list,
1584 &dev_priv->mm.fence_list);
1585
1586 trace_i915_gem_object_change_domain(obj,
1587 obj->read_domains,
1588 old_write_domain);
1589 }
1590 }
1591}
1592
1565/** 1593/**
1566 * Creates a new sequence number, emitting a write of it to the status page 1594 * Creates a new sequence number, emitting a write of it to the status page
1567 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1595 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1620,29 +1648,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1620 /* Associate any objects on the flushing list matching the write 1648 /* Associate any objects on the flushing list matching the write
1621 * domain we're flushing with our flush. 1649 * domain we're flushing with our flush.
1622 */ 1650 */
1623 if (flush_domains != 0) { 1651 if (flush_domains != 0)
1624 struct drm_i915_gem_object *obj_priv, *next; 1652 i915_gem_process_flushing_list(dev, flush_domains, seqno);
1625
1626 list_for_each_entry_safe(obj_priv, next,
1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1629 struct drm_gem_object *obj = obj_priv->obj;
1630
1631 if ((obj->write_domain & flush_domains) ==
1632 obj->write_domain) {
1633 uint32_t old_write_domain = obj->write_domain;
1634
1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1637 i915_gem_object_move_to_active(obj, seqno);
1638
1639 trace_i915_gem_object_change_domain(obj,
1640 obj->read_domains,
1641 old_write_domain);
1642 }
1643 }
1644
1645 }
1646 1653
1647 if (!dev_priv->mm.suspended) { 1654 if (!dev_priv->mm.suspended) {
1648 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1655 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1822,7 +1829,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1822 return -EIO; 1829 return -EIO;
1823 1830
1824 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1831 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1825 if (IS_IRONLAKE(dev)) 1832 if (HAS_PCH_SPLIT(dev))
1826 ier = I915_READ(DEIER) | I915_READ(GTIER); 1833 ier = I915_READ(DEIER) | I915_READ(GTIER);
1827 else 1834 else
1828 ier = I915_READ(IER); 1835 ier = I915_READ(IER);
@@ -1991,6 +1998,7 @@ int
1991i915_gem_object_unbind(struct drm_gem_object *obj) 1998i915_gem_object_unbind(struct drm_gem_object *obj)
1992{ 1999{
1993 struct drm_device *dev = obj->dev; 2000 struct drm_device *dev = obj->dev;
2001 drm_i915_private_t *dev_priv = dev->dev_private;
1994 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2002 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1995 int ret = 0; 2003 int ret = 0;
1996 2004
@@ -2046,8 +2054,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2046 } 2054 }
2047 2055
2048 /* Remove ourselves from the LRU list if present. */ 2056 /* Remove ourselves from the LRU list if present. */
2057 spin_lock(&dev_priv->mm.active_list_lock);
2049 if (!list_empty(&obj_priv->list)) 2058 if (!list_empty(&obj_priv->list))
2050 list_del_init(&obj_priv->list); 2059 list_del_init(&obj_priv->list);
2060 spin_unlock(&dev_priv->mm.active_list_lock);
2051 2061
2052 if (i915_gem_object_is_purgeable(obj_priv)) 2062 if (i915_gem_object_is_purgeable(obj_priv))
2053 i915_gem_object_truncate(obj); 2063 i915_gem_object_truncate(obj);
@@ -2085,11 +2095,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2085} 2095}
2086 2096
2087static int 2097static int
2098i915_gpu_idle(struct drm_device *dev)
2099{
2100 drm_i915_private_t *dev_priv = dev->dev_private;
2101 bool lists_empty;
2102 uint32_t seqno;
2103
2104 spin_lock(&dev_priv->mm.active_list_lock);
2105 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2106 list_empty(&dev_priv->mm.active_list);
2107 spin_unlock(&dev_priv->mm.active_list_lock);
2108
2109 if (lists_empty)
2110 return 0;
2111
2112 /* Flush everything onto the inactive list. */
2113 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2114 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2115 if (seqno == 0)
2116 return -ENOMEM;
2117
2118 return i915_wait_request(dev, seqno);
2119}
2120
2121static int
2088i915_gem_evict_everything(struct drm_device *dev) 2122i915_gem_evict_everything(struct drm_device *dev)
2089{ 2123{
2090 drm_i915_private_t *dev_priv = dev->dev_private; 2124 drm_i915_private_t *dev_priv = dev->dev_private;
2091 int ret; 2125 int ret;
2092 uint32_t seqno;
2093 bool lists_empty; 2126 bool lists_empty;
2094 2127
2095 spin_lock(&dev_priv->mm.active_list_lock); 2128 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2102,12 +2135,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2102 return -ENOSPC; 2135 return -ENOSPC;
2103 2136
2104 /* Flush everything (on to the inactive lists) and evict */ 2137 /* Flush everything (on to the inactive lists) and evict */
2105 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2138 ret = i915_gpu_idle(dev);
2106 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2107 if (seqno == 0)
2108 return -ENOMEM;
2109
2110 ret = i915_wait_request(dev, seqno);
2111 if (ret) 2139 if (ret)
2112 return ret; 2140 return ret;
2113 2141
@@ -2265,6 +2293,28 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2265 return 0; 2293 return 0;
2266} 2294}
2267 2295
2296static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2297{
2298 struct drm_gem_object *obj = reg->obj;
2299 struct drm_device *dev = obj->dev;
2300 drm_i915_private_t *dev_priv = dev->dev_private;
2301 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2302 int regnum = obj_priv->fence_reg;
2303 uint64_t val;
2304
2305 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2306 0xfffff000) << 32;
2307 val |= obj_priv->gtt_offset & 0xfffff000;
2308 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2309 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2310
2311 if (obj_priv->tiling_mode == I915_TILING_Y)
2312 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2313 val |= I965_FENCE_REG_VALID;
2314
2315 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2316}
2317
2268static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) 2318static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2269{ 2319{
2270 struct drm_gem_object *obj = reg->obj; 2320 struct drm_gem_object *obj = reg->obj;
@@ -2361,6 +2411,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2361 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2411 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2362} 2412}
2363 2413
2414static int i915_find_fence_reg(struct drm_device *dev)
2415{
2416 struct drm_i915_fence_reg *reg = NULL;
2417 struct drm_i915_gem_object *obj_priv = NULL;
2418 struct drm_i915_private *dev_priv = dev->dev_private;
2419 struct drm_gem_object *obj = NULL;
2420 int i, avail, ret;
2421
2422 /* First try to find a free reg */
2423 avail = 0;
2424 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2425 reg = &dev_priv->fence_regs[i];
2426 if (!reg->obj)
2427 return i;
2428
2429 obj_priv = reg->obj->driver_private;
2430 if (!obj_priv->pin_count)
2431 avail++;
2432 }
2433
2434 if (avail == 0)
2435 return -ENOSPC;
2436
2437 /* None available, try to steal one or wait for a user to finish */
2438 i = I915_FENCE_REG_NONE;
2439 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
2440 fence_list) {
2441 obj = obj_priv->obj;
2442
2443 if (obj_priv->pin_count)
2444 continue;
2445
2446 /* found one! */
2447 i = obj_priv->fence_reg;
2448 break;
2449 }
2450
2451 BUG_ON(i == I915_FENCE_REG_NONE);
2452
2453 /* We only have a reference on obj from the active list. put_fence_reg
2454 * might drop that one, causing a use-after-free in it. So hold a
2455 * private reference to obj like the other callers of put_fence_reg
2456 * (set_tiling ioctl) do. */
2457 drm_gem_object_reference(obj);
2458 ret = i915_gem_object_put_fence_reg(obj);
2459 drm_gem_object_unreference(obj);
2460 if (ret != 0)
2461 return ret;
2462
2463 return i;
2464}
2465
2364/** 2466/**
2365 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2467 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2366 * @obj: object to map through a fence reg 2468 * @obj: object to map through a fence reg
@@ -2381,8 +2483,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2381 struct drm_i915_private *dev_priv = dev->dev_private; 2483 struct drm_i915_private *dev_priv = dev->dev_private;
2382 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2484 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2383 struct drm_i915_fence_reg *reg = NULL; 2485 struct drm_i915_fence_reg *reg = NULL;
2384 struct drm_i915_gem_object *old_obj_priv = NULL; 2486 int ret;
2385 int i, ret, avail;
2386 2487
2387 /* Just update our place in the LRU if our fence is getting used. */ 2488 /* Just update our place in the LRU if our fence is getting used. */
2388 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2489 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2410,86 +2511,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2410 break; 2511 break;
2411 } 2512 }
2412 2513
2413 /* First try to find a free reg */ 2514 ret = i915_find_fence_reg(dev);
2414 avail = 0; 2515 if (ret < 0)
2415 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2516 return ret;
2416 reg = &dev_priv->fence_regs[i];
2417 if (!reg->obj)
2418 break;
2419
2420 old_obj_priv = reg->obj->driver_private;
2421 if (!old_obj_priv->pin_count)
2422 avail++;
2423 }
2424
2425 /* None available, try to steal one or wait for a user to finish */
2426 if (i == dev_priv->num_fence_regs) {
2427 struct drm_gem_object *old_obj = NULL;
2428
2429 if (avail == 0)
2430 return -ENOSPC;
2431
2432 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2433 fence_list) {
2434 old_obj = old_obj_priv->obj;
2435
2436 if (old_obj_priv->pin_count)
2437 continue;
2438
2439 /* Take a reference, as otherwise the wait_rendering
2440 * below may cause the object to get freed out from
2441 * under us.
2442 */
2443 drm_gem_object_reference(old_obj);
2444
2445 /* i915 uses fences for GPU access to tiled buffers */
2446 if (IS_I965G(dev) || !old_obj_priv->active)
2447 break;
2448
2449 /* This brings the object to the head of the LRU if it
2450 * had been written to. The only way this should
2451 * result in us waiting longer than the expected
2452 * optimal amount of time is if there was a
2453 * fence-using buffer later that was read-only.
2454 */
2455 i915_gem_object_flush_gpu_write_domain(old_obj);
2456 ret = i915_gem_object_wait_rendering(old_obj);
2457 if (ret != 0) {
2458 drm_gem_object_unreference(old_obj);
2459 return ret;
2460 }
2461
2462 break;
2463 }
2464
2465 /*
2466 * Zap this virtual mapping so we can set up a fence again
2467 * for this object next time we need it.
2468 */
2469 i915_gem_release_mmap(old_obj);
2470
2471 i = old_obj_priv->fence_reg;
2472 reg = &dev_priv->fence_regs[i];
2473
2474 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2475 list_del_init(&old_obj_priv->fence_list);
2476
2477 drm_gem_object_unreference(old_obj);
2478 }
2479 2517
2480 obj_priv->fence_reg = i; 2518 obj_priv->fence_reg = ret;
2519 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2481 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2520 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2482 2521
2483 reg->obj = obj; 2522 reg->obj = obj;
2484 2523
2485 if (IS_I965G(dev)) 2524 if (IS_GEN6(dev))
2525 sandybridge_write_fence_reg(reg);
2526 else if (IS_I965G(dev))
2486 i965_write_fence_reg(reg); 2527 i965_write_fence_reg(reg);
2487 else if (IS_I9XX(dev)) 2528 else if (IS_I9XX(dev))
2488 i915_write_fence_reg(reg); 2529 i915_write_fence_reg(reg);
2489 else 2530 else
2490 i830_write_fence_reg(reg); 2531 i830_write_fence_reg(reg);
2491 2532
2492 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); 2533 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2534 obj_priv->tiling_mode);
2493 2535
2494 return 0; 2536 return 0;
2495} 2537}
@@ -2508,9 +2550,12 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2508 drm_i915_private_t *dev_priv = dev->dev_private; 2550 drm_i915_private_t *dev_priv = dev->dev_private;
2509 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2551 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2510 2552
2511 if (IS_I965G(dev)) 2553 if (IS_GEN6(dev)) {
2554 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2555 (obj_priv->fence_reg * 8), 0);
2556 } else if (IS_I965G(dev)) {
2512 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2557 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2513 else { 2558 } else {
2514 uint32_t fence_reg; 2559 uint32_t fence_reg;
2515 2560
2516 if (obj_priv->fence_reg < 8) 2561 if (obj_priv->fence_reg < 8)
@@ -2544,6 +2589,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2544 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2589 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2545 return 0; 2590 return 0;
2546 2591
2592 /* If we've changed tiling, GTT-mappings of the object
2593 * need to re-fault to ensure that the correct fence register
2594 * setup is in place.
2595 */
2596 i915_gem_release_mmap(obj);
2597
2547 /* On the i915, GPU access to tiled buffers is via a fence, 2598 /* On the i915, GPU access to tiled buffers is via a fence,
2548 * therefore we must wait for any outstanding access to complete 2599 * therefore we must wait for any outstanding access to complete
2549 * before clearing the fence. 2600 * before clearing the fence.
@@ -2552,12 +2603,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2552 int ret; 2603 int ret;
2553 2604
2554 i915_gem_object_flush_gpu_write_domain(obj); 2605 i915_gem_object_flush_gpu_write_domain(obj);
2555 i915_gem_object_flush_gtt_write_domain(obj);
2556 ret = i915_gem_object_wait_rendering(obj); 2606 ret = i915_gem_object_wait_rendering(obj);
2557 if (ret != 0) 2607 if (ret != 0)
2558 return ret; 2608 return ret;
2559 } 2609 }
2560 2610
2611 i915_gem_object_flush_gtt_write_domain(obj);
2561 i915_gem_clear_fence_reg (obj); 2612 i915_gem_clear_fence_reg (obj);
2562 2613
2563 return 0; 2614 return 0;
@@ -2697,7 +2748,6 @@ static void
2697i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2748i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2698{ 2749{
2699 struct drm_device *dev = obj->dev; 2750 struct drm_device *dev = obj->dev;
2700 uint32_t seqno;
2701 uint32_t old_write_domain; 2751 uint32_t old_write_domain;
2702 2752
2703 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2753 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2706,9 +2756,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2706 /* Queue the GPU write cache flushing we need. */ 2756 /* Queue the GPU write cache flushing we need. */
2707 old_write_domain = obj->write_domain; 2757 old_write_domain = obj->write_domain;
2708 i915_gem_flush(dev, 0, obj->write_domain); 2758 i915_gem_flush(dev, 0, obj->write_domain);
2709 seqno = i915_add_request(dev, NULL, obj->write_domain); 2759 (void) i915_add_request(dev, NULL, obj->write_domain);
2710 BUG_ON(obj->write_domain); 2760 BUG_ON(obj->write_domain);
2711 i915_gem_object_move_to_active(obj, seqno);
2712 2761
2713 trace_i915_gem_object_change_domain(obj, 2762 trace_i915_gem_object_change_domain(obj,
2714 obj->read_domains, 2763 obj->read_domains,
@@ -3247,7 +3296,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3247 obj_priv->tiling_mode != I915_TILING_NONE; 3296 obj_priv->tiling_mode != I915_TILING_NONE;
3248 3297
3249 /* Check fence reg constraints and rebind if necessary */ 3298 /* Check fence reg constraints and rebind if necessary */
3250 if (need_fence && !i915_obj_fenceable(dev, obj)) 3299 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3300 obj_priv->tiling_mode))
3251 i915_gem_object_unbind(obj); 3301 i915_gem_object_unbind(obj);
3252 3302
3253 /* Choose the GTT offset for our buffer and put it there. */ 3303 /* Choose the GTT offset for our buffer and put it there. */
@@ -3317,6 +3367,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3317 } 3367 }
3318 3368
3319 /* Validate that the target is in a valid r/w GPU domain */ 3369 /* Validate that the target is in a valid r/w GPU domain */
3370 if (reloc->write_domain & (reloc->write_domain - 1)) {
3371 DRM_ERROR("reloc with multiple write domains: "
3372 "obj %p target %d offset %d "
3373 "read %08x write %08x",
3374 obj, reloc->target_handle,
3375 (int) reloc->offset,
3376 reloc->read_domains,
3377 reloc->write_domain);
3378 return -EINVAL;
3379 }
3320 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3380 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3321 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3381 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3322 DRM_ERROR("reloc with read/write CPU domains: " 3382 DRM_ERROR("reloc with read/write CPU domains: "
@@ -4445,8 +4505,7 @@ int
4445i915_gem_idle(struct drm_device *dev) 4505i915_gem_idle(struct drm_device *dev)
4446{ 4506{
4447 drm_i915_private_t *dev_priv = dev->dev_private; 4507 drm_i915_private_t *dev_priv = dev->dev_private;
4448 uint32_t seqno, cur_seqno, last_seqno; 4508 int ret;
4449 int stuck, ret;
4450 4509
4451 mutex_lock(&dev->struct_mutex); 4510 mutex_lock(&dev->struct_mutex);
4452 4511
@@ -4455,115 +4514,36 @@ i915_gem_idle(struct drm_device *dev)
4455 return 0; 4514 return 0;
4456 } 4515 }
4457 4516
4458 /* Hack! Don't let anybody do execbuf while we don't control the chip. 4517 ret = i915_gpu_idle(dev);
4459 * We need to replace this with a semaphore, or something. 4518 if (ret) {
4460 */
4461 dev_priv->mm.suspended = 1;
4462 del_timer(&dev_priv->hangcheck_timer);
4463
4464 /* Cancel the retire work handler, wait for it to finish if running
4465 */
4466 mutex_unlock(&dev->struct_mutex);
4467 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4468 mutex_lock(&dev->struct_mutex);
4469
4470 i915_kernel_lost_context(dev);
4471
4472 /* Flush the GPU along with all non-CPU write domains
4473 */
4474 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4475 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4476
4477 if (seqno == 0) {
4478 mutex_unlock(&dev->struct_mutex); 4519 mutex_unlock(&dev->struct_mutex);
4479 return -ENOMEM; 4520 return ret;
4480 } 4521 }
4481 4522
4482 dev_priv->mm.waiting_gem_seqno = seqno; 4523 /* Under UMS, be paranoid and evict. */
4483 last_seqno = 0; 4524 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4484 stuck = 0; 4525 ret = i915_gem_evict_from_inactive_list(dev);
4485 for (;;) { 4526 if (ret) {
4486 cur_seqno = i915_get_gem_seqno(dev); 4527 mutex_unlock(&dev->struct_mutex);
4487 if (i915_seqno_passed(cur_seqno, seqno)) 4528 return ret;
4488 break;
4489 if (last_seqno == cur_seqno) {
4490 if (stuck++ > 100) {
4491 DRM_ERROR("hardware wedged\n");
4492 atomic_set(&dev_priv->mm.wedged, 1);
4493 DRM_WAKEUP(&dev_priv->irq_queue);
4494 break;
4495 }
4496 } 4529 }
4497 msleep(10);
4498 last_seqno = cur_seqno;
4499 }
4500 dev_priv->mm.waiting_gem_seqno = 0;
4501
4502 i915_gem_retire_requests(dev);
4503
4504 spin_lock(&dev_priv->mm.active_list_lock);
4505 if (!atomic_read(&dev_priv->mm.wedged)) {
4506 /* Active and flushing should now be empty as we've
4507 * waited for a sequence higher than any pending execbuffer
4508 */
4509 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4510 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4511 /* Request should now be empty as we've also waited
4512 * for the last request in the list
4513 */
4514 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4515 } 4530 }
4516 4531
4517 /* Empty the active and flushing lists to inactive. If there's 4532 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4518 * anything left at this point, it means that we're wedged and 4533 * We need to replace this with a semaphore, or something.
4519 * nothing good's going to happen by leaving them there. So strip 4534 * And not confound mm.suspended!
4520 * the GPU domains and just stuff them onto inactive.
4521 */ 4535 */
4522 while (!list_empty(&dev_priv->mm.active_list)) { 4536 dev_priv->mm.suspended = 1;
4523 struct drm_gem_object *obj; 4537 del_timer(&dev_priv->hangcheck_timer);
4524 uint32_t old_write_domain;
4525
4526 obj = list_first_entry(&dev_priv->mm.active_list,
4527 struct drm_i915_gem_object,
4528 list)->obj;
4529 old_write_domain = obj->write_domain;
4530 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4531 i915_gem_object_move_to_inactive(obj);
4532
4533 trace_i915_gem_object_change_domain(obj,
4534 obj->read_domains,
4535 old_write_domain);
4536 }
4537 spin_unlock(&dev_priv->mm.active_list_lock);
4538
4539 while (!list_empty(&dev_priv->mm.flushing_list)) {
4540 struct drm_gem_object *obj;
4541 uint32_t old_write_domain;
4542
4543 obj = list_first_entry(&dev_priv->mm.flushing_list,
4544 struct drm_i915_gem_object,
4545 list)->obj;
4546 old_write_domain = obj->write_domain;
4547 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4548 i915_gem_object_move_to_inactive(obj);
4549
4550 trace_i915_gem_object_change_domain(obj,
4551 obj->read_domains,
4552 old_write_domain);
4553 }
4554
4555
4556 /* Move all inactive buffers out of the GTT. */
4557 ret = i915_gem_evict_from_inactive_list(dev);
4558 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4559 if (ret) {
4560 mutex_unlock(&dev->struct_mutex);
4561 return ret;
4562 }
4563 4538
4539 i915_kernel_lost_context(dev);
4564 i915_gem_cleanup_ringbuffer(dev); 4540 i915_gem_cleanup_ringbuffer(dev);
4541
4565 mutex_unlock(&dev->struct_mutex); 4542 mutex_unlock(&dev->struct_mutex);
4566 4543
4544 /* Cancel the retire work handler, which should be idle now. */
4545 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4546
4567 return 0; 4547 return 0;
4568} 4548}
4569 4549
@@ -4607,8 +4587,13 @@ i915_gem_init_hws(struct drm_device *dev)
4607 } 4587 }
4608 dev_priv->hws_obj = obj; 4588 dev_priv->hws_obj = obj;
4609 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4589 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4610 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4590 if (IS_GEN6(dev)) {
4611 I915_READ(HWS_PGA); /* posting read */ 4591 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4592 I915_READ(HWS_PGA_GEN6); /* posting read */
4593 } else {
4594 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4595 I915_READ(HWS_PGA); /* posting read */
4596 }
4612 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4597 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4613 4598
4614 return 0; 4599 return 0;
@@ -4850,7 +4835,8 @@ i915_gem_load(struct drm_device *dev)
4850 spin_unlock(&shrink_list_lock); 4835 spin_unlock(&shrink_list_lock);
4851 4836
4852 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4837 /* Old X drivers will take 0-2 for front, back, depth buffers */
4853 dev_priv->fence_reg_start = 3; 4838 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4839 dev_priv->fence_reg_start = 3;
4854 4840
4855 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4841 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4856 dev_priv->num_fence_regs = 16; 4842 dev_priv->num_fence_regs = 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2685bf..b5c55d88ff76 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
30#include "linux/string.h" 28#include "linux/string.h"
31#include "linux/bitops.h" 29#include "linux/bitops.h"
32#include "drmP.h" 30#include "drmP.h"
@@ -83,120 +81,6 @@
83 * to match what the GPU expects. 81 * to match what the GPU expects.
84 */ 82 */
85 83
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
99 u32 temp_lo, temp_hi = 0;
100 u64 mchbar_addr;
101 int ret = 0;
102
103 if (IS_I965G(dev))
104 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
105 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
106 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
107
108 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
109#ifdef CONFIG_PNP
110 if (mchbar_addr &&
111 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
112 ret = 0;
113 goto out;
114 }
115#endif
116
117 /* Get some space for it */
118 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
119 MCHBAR_SIZE, MCHBAR_SIZE,
120 PCIBIOS_MIN_MEM,
121 0, pcibios_align_resource,
122 dev_priv->bridge_dev);
123 if (ret) {
124 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0;
126 goto out;
127 }
128
129 if (IS_I965G(dev))
130 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
131 upper_32_bits(dev_priv->mch_res.start));
132
133 pci_write_config_dword(dev_priv->bridge_dev, reg,
134 lower_32_bits(dev_priv->mch_res.start));
135out:
136 return ret;
137}
138
139/* Setup MCHBAR if possible, return true if we should disable it again */
140static bool
141intel_setup_mchbar(struct drm_device *dev)
142{
143 drm_i915_private_t *dev_priv = dev->dev_private;
144 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
145 u32 temp;
146 bool need_disable = false, enabled;
147
148 if (IS_I915G(dev) || IS_I915GM(dev)) {
149 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
150 enabled = !!(temp & DEVEN_MCHBAR_EN);
151 } else {
152 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
153 enabled = temp & 1;
154 }
155
156 /* If it's already enabled, don't have to do anything */
157 if (enabled)
158 goto out;
159
160 if (intel_alloc_mchbar_resource(dev))
161 goto out;
162
163 need_disable = true;
164
165 /* Space is allocated or reserved, so enable it. */
166 if (IS_I915G(dev) || IS_I915GM(dev)) {
167 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
168 temp | DEVEN_MCHBAR_EN);
169 } else {
170 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
171 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
172 }
173out:
174 return need_disable;
175}
176
177static void
178intel_teardown_mchbar(struct drm_device *dev, bool disable)
179{
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
182 u32 temp;
183
184 if (disable) {
185 if (IS_I915G(dev) || IS_I915GM(dev)) {
186 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
187 temp &= ~DEVEN_MCHBAR_EN;
188 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
189 } else {
190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
191 temp &= ~1;
192 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
193 }
194 }
195
196 if (dev_priv->mch_res.start)
197 release_resource(&dev_priv->mch_res);
198}
199
200/** 84/**
201 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * Detects bit 6 swizzling of address lookup between IGD access and CPU
202 * access through main memory. 86 * access through main memory.
@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
207 drm_i915_private_t *dev_priv = dev->dev_private; 91 drm_i915_private_t *dev_priv = dev->dev_private;
208 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable;
211 94
212 if (IS_IRONLAKE(dev)) { 95 if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
213 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 97 * same swizzling setup.
215 */ 98 */
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
224 } else if (IS_MOBILE(dev)) { 107 } else if (IS_MOBILE(dev)) {
225 uint32_t dcc; 108 uint32_t dcc;
226 109
227 /* Try to make sure MCHBAR is enabled before poking at it */
228 need_disable = intel_setup_mchbar(dev);
229
230 /* On mobile 9xx chipsets, channel interleave by the CPU is 110 /* On mobile 9xx chipsets, channel interleave by the CPU is
231 * determined by DCC. For single-channel, neither the CPU 111 * determined by DCC. For single-channel, neither the CPU
232 * nor the GPU do swizzling. For dual channel interleaved, 112 * nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
266 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 146 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
267 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 147 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
268 } 148 }
269
270 intel_teardown_mchbar(dev, need_disable);
271 } else { 149 } else {
272 /* The 965, G33, and newer, have a very flexible memory 150 /* The 965, G33, and newer, have a very flexible memory
273 * configuration. It will enable dual-channel mode 151 * configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
302 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 180 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
303} 181}
304 182
305
306/**
307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
309 */
310bool
311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
312{
313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
314
315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */
317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
323 } else {
324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
325 return false;
326 }
327
328 /* Power of two sized... */
329 if (obj->size & (obj->size - 1))
330 return false;
331
332 /* Objects must be size aligned as well */
333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
336}
337
338/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
339bool 184bool
340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
391 return true; 236 return true;
392} 237}
393 238
394static bool 239bool
395i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
396{ 241{
397 struct drm_device *dev = obj->dev; 242 struct drm_device *dev = obj->dev;
@@ -438,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
438 obj_priv = obj->driver_private; 283 obj_priv = obj->driver_private;
439 284
440 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 285 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
441 mutex_lock(&dev->struct_mutex); 286 drm_gem_object_unreference_unlocked(obj);
442 drm_gem_object_unreference(obj);
443 mutex_unlock(&dev->struct_mutex);
444 return -EINVAL; 287 return -EINVAL;
445 } 288 }
446 289
@@ -493,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
493 goto err; 336 goto err;
494 } 337 }
495 338
496 /* If we've changed tiling, GTT-mappings of the object
497 * need to re-fault to ensure that the correct fence register
498 * setup is in place.
499 */
500 i915_gem_release_mmap(obj);
501
502 obj_priv->tiling_mode = args->tiling_mode; 339 obj_priv->tiling_mode = args->tiling_mode;
503 obj_priv->stride = args->stride; 340 obj_priv->stride = args->stride;
504 } 341 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63e..5388354da0d1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev)
166{ 166{
167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168 168
169 if (IS_IRONLAKE(dev)) 169 if (HAS_PCH_SPLIT(dev))
170 ironlake_enable_display_irq(dev_priv, DE_GSE); 170 ironlake_enable_display_irq(dev_priv, DE_GSE);
171 else 171 else
172 i915_enable_pipestat(dev_priv, 1, 172 i915_enable_pipestat(dev_priv, 1,
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
269 drm_sysfs_hotplug_event(dev); 269 drm_sysfs_hotplug_event(dev);
270} 270}
271 271
272static void i915_handle_rps_change(struct drm_device *dev)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 u32 busy_up, busy_down, max_avg, min_avg;
276 u16 rgvswctl;
277 u8 new_delay = dev_priv->cur_delay;
278
279 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
280 busy_up = I915_READ(RCPREVBSYTUPAVG);
281 busy_down = I915_READ(RCPREVBSYTDNAVG);
282 max_avg = I915_READ(RCBMAXAVG);
283 min_avg = I915_READ(RCBMINAVG);
284
285 /* Handle RCS change request from hw */
286 if (busy_up > max_avg) {
287 if (dev_priv->cur_delay != dev_priv->max_delay)
288 new_delay = dev_priv->cur_delay - 1;
289 if (new_delay < dev_priv->max_delay)
290 new_delay = dev_priv->max_delay;
291 } else if (busy_down < min_avg) {
292 if (dev_priv->cur_delay != dev_priv->min_delay)
293 new_delay = dev_priv->cur_delay + 1;
294 if (new_delay > dev_priv->min_delay)
295 new_delay = dev_priv->min_delay;
296 }
297
298 DRM_DEBUG("rps change requested: %d -> %d\n",
299 dev_priv->cur_delay, new_delay);
300
301 rgvswctl = I915_READ(MEMSWCTL);
302 if (rgvswctl & MEMCTL_CMD_STS) {
303 DRM_ERROR("gpu busy, RCS change rejected\n");
304 return; /* still busy with another command */
305 }
306
307 /* Program the new state */
308 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
309 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
310 I915_WRITE(MEMSWCTL, rgvswctl);
311 POSTING_READ(MEMSWCTL);
312
313 rgvswctl |= MEMCTL_CMD_STS;
314 I915_WRITE(MEMSWCTL, rgvswctl);
315
316 dev_priv->cur_delay = new_delay;
317
318 DRM_DEBUG("rps changed\n");
319
320 return;
321}
322
272irqreturn_t ironlake_irq_handler(struct drm_device *dev) 323irqreturn_t ironlake_irq_handler(struct drm_device *dev)
273{ 324{
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 382 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
332 } 383 }
333 384
385 if (de_iir & DE_PCU_EVENT) {
386 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
387 i915_handle_rps_change(dev);
388 }
389
334 /* should clear PCH hotplug event before clear CPU irq */ 390 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir); 391 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir); 392 I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
376 } 432 }
377} 433}
378 434
435static struct drm_i915_error_object *
436i915_error_object_create(struct drm_device *dev,
437 struct drm_gem_object *src)
438{
439 struct drm_i915_error_object *dst;
440 struct drm_i915_gem_object *src_priv;
441 int page, page_count;
442
443 if (src == NULL)
444 return NULL;
445
446 src_priv = src->driver_private;
447 if (src_priv->pages == NULL)
448 return NULL;
449
450 page_count = src->size / PAGE_SIZE;
451
452 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
453 if (dst == NULL)
454 return NULL;
455
456 for (page = 0; page < page_count; page++) {
457 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
458 if (d == NULL)
459 goto unwind;
460 s = kmap_atomic(src_priv->pages[page], KM_USER0);
461 memcpy(d, s, PAGE_SIZE);
462 kunmap_atomic(s, KM_USER0);
463 dst->pages[page] = d;
464 }
465 dst->page_count = page_count;
466 dst->gtt_offset = src_priv->gtt_offset;
467
468 return dst;
469
470unwind:
471 while (page--)
472 kfree(dst->pages[page]);
473 kfree(dst);
474 return NULL;
475}
476
477static void
478i915_error_object_free(struct drm_i915_error_object *obj)
479{
480 int page;
481
482 if (obj == NULL)
483 return;
484
485 for (page = 0; page < obj->page_count; page++)
486 kfree(obj->pages[page]);
487
488 kfree(obj);
489}
490
491static void
492i915_error_state_free(struct drm_device *dev,
493 struct drm_i915_error_state *error)
494{
495 i915_error_object_free(error->batchbuffer[0]);
496 i915_error_object_free(error->batchbuffer[1]);
497 i915_error_object_free(error->ringbuffer);
498 kfree(error->active_bo);
499 kfree(error);
500}
501
502static u32
503i915_get_bbaddr(struct drm_device *dev, u32 *ring)
504{
505 u32 cmd;
506
507 if (IS_I830(dev) || IS_845G(dev))
508 cmd = MI_BATCH_BUFFER;
509 else if (IS_I965G(dev))
510 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
511 MI_BATCH_NON_SECURE_I965);
512 else
513 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
514
515 return ring[0] == cmd ? ring[1] : 0;
516}
517
518static u32
519i915_ringbuffer_last_batch(struct drm_device *dev)
520{
521 struct drm_i915_private *dev_priv = dev->dev_private;
522 u32 head, bbaddr;
523 u32 *ring;
524
525 /* Locate the current position in the ringbuffer and walk back
526 * to find the most recently dispatched batch buffer.
527 */
528 bbaddr = 0;
529 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
530 ring = (u32 *)(dev_priv->ring.virtual_start + head);
531
532 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
533 bbaddr = i915_get_bbaddr(dev, ring);
534 if (bbaddr)
535 break;
536 }
537
538 if (bbaddr == 0) {
539 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
540 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
541 bbaddr = i915_get_bbaddr(dev, ring);
542 if (bbaddr)
543 break;
544 }
545 }
546
547 return bbaddr;
548}
549
379/** 550/**
380 * i915_capture_error_state - capture an error record for later analysis 551 * i915_capture_error_state - capture an error record for later analysis
381 * @dev: drm device 552 * @dev: drm device
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
388static void i915_capture_error_state(struct drm_device *dev) 559static void i915_capture_error_state(struct drm_device *dev)
389{ 560{
390 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_i915_gem_object *obj_priv;
391 struct drm_i915_error_state *error; 563 struct drm_i915_error_state *error;
564 struct drm_gem_object *batchbuffer[2];
392 unsigned long flags; 565 unsigned long flags;
566 u32 bbaddr;
567 int count;
393 568
394 spin_lock_irqsave(&dev_priv->error_lock, flags); 569 spin_lock_irqsave(&dev_priv->error_lock, flags);
395 if (dev_priv->first_error) 570 error = dev_priv->first_error;
396 goto out; 571 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
572 if (error)
573 return;
397 574
398 error = kmalloc(sizeof(*error), GFP_ATOMIC); 575 error = kmalloc(sizeof(*error), GFP_ATOMIC);
399 if (!error) { 576 if (!error) {
400 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); 577 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
401 goto out; 578 return;
402 } 579 }
403 580
581 error->seqno = i915_get_gem_seqno(dev);
404 error->eir = I915_READ(EIR); 582 error->eir = I915_READ(EIR);
405 error->pgtbl_er = I915_READ(PGTBL_ER); 583 error->pgtbl_er = I915_READ(PGTBL_ER);
406 error->pipeastat = I915_READ(PIPEASTAT); 584 error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
411 error->ipehr = I915_READ(IPEHR); 589 error->ipehr = I915_READ(IPEHR);
412 error->instdone = I915_READ(INSTDONE); 590 error->instdone = I915_READ(INSTDONE);
413 error->acthd = I915_READ(ACTHD); 591 error->acthd = I915_READ(ACTHD);
592 error->bbaddr = 0;
414 } else { 593 } else {
415 error->ipeir = I915_READ(IPEIR_I965); 594 error->ipeir = I915_READ(IPEIR_I965);
416 error->ipehr = I915_READ(IPEHR_I965); 595 error->ipehr = I915_READ(IPEHR_I965);
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
418 error->instps = I915_READ(INSTPS); 597 error->instps = I915_READ(INSTPS);
419 error->instdone1 = I915_READ(INSTDONE1); 598 error->instdone1 = I915_READ(INSTDONE1);
420 error->acthd = I915_READ(ACTHD_I965); 599 error->acthd = I915_READ(ACTHD_I965);
600 error->bbaddr = I915_READ64(BB_ADDR);
421 } 601 }
422 602
423 do_gettimeofday(&error->time); 603 bbaddr = i915_ringbuffer_last_batch(dev);
604
605 /* Grab the current batchbuffer, most likely to have crashed. */
606 batchbuffer[0] = NULL;
607 batchbuffer[1] = NULL;
608 count = 0;
609 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
610 struct drm_gem_object *obj = obj_priv->obj;
611
612 if (batchbuffer[0] == NULL &&
613 bbaddr >= obj_priv->gtt_offset &&
614 bbaddr < obj_priv->gtt_offset + obj->size)
615 batchbuffer[0] = obj;
616
617 if (batchbuffer[1] == NULL &&
618 error->acthd >= obj_priv->gtt_offset &&
619 error->acthd < obj_priv->gtt_offset + obj->size &&
620 batchbuffer[0] != obj)
621 batchbuffer[1] = obj;
622
623 count++;
624 }
424 625
425 dev_priv->first_error = error; 626 /* We need to copy these to an anonymous buffer as the simplest
627 * method to avoid being overwritten by userpace.
628 */
629 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
630 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
631
632 /* Record the ringbuffer */
633 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
634
635 /* Record buffers on the active list. */
636 error->active_bo = NULL;
637 error->active_bo_count = 0;
638
639 if (count)
640 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
641 GFP_ATOMIC);
642
643 if (error->active_bo) {
644 int i = 0;
645 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
646 struct drm_gem_object *obj = obj_priv->obj;
647
648 error->active_bo[i].size = obj->size;
649 error->active_bo[i].name = obj->name;
650 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
651 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
652 error->active_bo[i].read_domains = obj->read_domains;
653 error->active_bo[i].write_domain = obj->write_domain;
654 error->active_bo[i].fence_reg = obj_priv->fence_reg;
655 error->active_bo[i].pinned = 0;
656 if (obj_priv->pin_count > 0)
657 error->active_bo[i].pinned = 1;
658 if (obj_priv->user_pin_count > 0)
659 error->active_bo[i].pinned = -1;
660 error->active_bo[i].tiling = obj_priv->tiling_mode;
661 error->active_bo[i].dirty = obj_priv->dirty;
662 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
663
664 if (++i == count)
665 break;
666 }
667 error->active_bo_count = i;
668 }
669
670 do_gettimeofday(&error->time);
426 671
427out: 672 spin_lock_irqsave(&dev_priv->error_lock, flags);
673 if (dev_priv->first_error == NULL) {
674 dev_priv->first_error = error;
675 error = NULL;
676 }
428 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 677 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
678
679 if (error)
680 i915_error_state_free(dev, error);
681}
682
683void i915_destroy_error_state(struct drm_device *dev)
684{
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 struct drm_i915_error_state *error;
687
688 spin_lock(&dev_priv->error_lock);
689 error = dev_priv->first_error;
690 dev_priv->first_error = NULL;
691 spin_unlock(&dev_priv->error_lock);
692
693 if (error)
694 i915_error_state_free(dev, error);
429} 695}
430 696
431/** 697/**
@@ -576,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
576 842
577 atomic_inc(&dev_priv->irq_received); 843 atomic_inc(&dev_priv->irq_received);
578 844
579 if (IS_IRONLAKE(dev)) 845 if (HAS_PCH_SPLIT(dev))
580 return ironlake_irq_handler(dev); 846 return ironlake_irq_handler(dev);
581 847
582 iir = I915_READ(IIR); 848 iir = I915_READ(IIR);
@@ -737,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev)
737 1003
738 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1004 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
739 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1005 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
740 if (IS_IRONLAKE(dev)) 1006 if (HAS_PCH_SPLIT(dev))
741 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1007 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
742 else 1008 else
743 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1009 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -753,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev)
753 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1019 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
754 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1020 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
755 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1021 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
756 if (IS_IRONLAKE(dev)) 1022 if (HAS_PCH_SPLIT(dev))
757 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1023 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
758 else 1024 else
759 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1025 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -861,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
861 return -EINVAL; 1127 return -EINVAL;
862 1128
863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1129 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
864 if (IS_IRONLAKE(dev)) 1130 if (HAS_PCH_SPLIT(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1131 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1132 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev)) 1133 else if (IS_I965G(dev))
@@ -883,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
883 unsigned long irqflags; 1149 unsigned long irqflags;
884 1150
885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1151 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
886 if (IS_IRONLAKE(dev)) 1152 if (HAS_PCH_SPLIT(dev))
887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1153 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1154 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else 1155 else
@@ -897,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev)
897{ 1163{
898 struct drm_i915_private *dev_priv = dev->dev_private; 1164 struct drm_i915_private *dev_priv = dev->dev_private;
899 1165
900 if (!IS_IRONLAKE(dev)) 1166 if (!HAS_PCH_SPLIT(dev))
901 opregion_enable_asle(dev); 1167 opregion_enable_asle(dev);
902 dev_priv->irq_enabled = 1; 1168 dev_priv->irq_enabled = 1;
903} 1169}
@@ -973,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data)
973 struct drm_device *dev = (struct drm_device *)data; 1239 struct drm_device *dev = (struct drm_device *)data;
974 drm_i915_private_t *dev_priv = dev->dev_private; 1240 drm_i915_private_t *dev_priv = dev->dev_private;
975 uint32_t acthd; 1241 uint32_t acthd;
976 1242
1243 /* No reset support on this chip yet. */
1244 if (IS_GEN6(dev))
1245 return;
1246
977 if (!IS_I965G(dev)) 1247 if (!IS_I965G(dev))
978 acthd = I915_READ(ACTHD); 1248 acthd = I915_READ(ACTHD);
979 else 1249 else
@@ -1064,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1064 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1334 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1065 (void) I915_READ(SDEIER); 1335 (void) I915_READ(SDEIER);
1066 1336
1337 if (IS_IRONLAKE_M(dev)) {
1338 /* Clear & enable PCU event interrupts */
1339 I915_WRITE(DEIIR, DE_PCU_EVENT);
1340 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1341 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1342 }
1343
1067 return 0; 1344 return 0;
1068} 1345}
1069 1346
@@ -1076,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1076 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1353 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1077 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1354 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1078 1355
1079 if (IS_IRONLAKE(dev)) { 1356 if (HAS_PCH_SPLIT(dev)) {
1080 ironlake_irq_preinstall(dev); 1357 ironlake_irq_preinstall(dev);
1081 return; 1358 return;
1082 } 1359 }
@@ -1108,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1108 1385
1109 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1386 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1110 1387
1111 if (IS_IRONLAKE(dev)) 1388 if (HAS_PCH_SPLIT(dev))
1112 return ironlake_irq_postinstall(dev); 1389 return ironlake_irq_postinstall(dev);
1113 1390
1114 /* Unmask the interrupts that we always want on. */ 1391 /* Unmask the interrupts that we always want on. */
@@ -1196,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1196 1473
1197 dev_priv->vblank_pipe = 0; 1474 dev_priv->vblank_pipe = 0;
1198 1475
1199 if (IS_IRONLAKE(dev)) { 1476 if (HAS_PCH_SPLIT(dev)) {
1200 ironlake_irq_uninstall(dev); 1477 ironlake_irq_uninstall(dev);
1201 return; 1478 return;
1202 } 1479 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b6..3d59862c7ccd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
55 55
56#define SNB_GMCH_CTRL 0x50
57#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
58#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
59#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
60#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
61#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
62#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
63#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
64#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
65#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
66#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
67#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
68#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
69#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
70#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
71#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
72#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
73#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
74
56/* PCI config space */ 75/* PCI config space */
57 76
58#define HPLLCC 0xc0 /* 855 only */ 77#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
61#define GC_CLOCK_100_200 (1 << 0) 80#define GC_CLOCK_100_200 (1 << 0)
62#define GC_CLOCK_100_133 (2 << 0) 81#define GC_CLOCK_100_133 (2 << 0)
63#define GC_CLOCK_166_250 (3 << 0) 82#define GC_CLOCK_166_250 (3 << 0)
83#define GCFGC2 0xda
64#define GCFGC 0xf0 /* 915+ only */ 84#define GCFGC 0xf0 /* 915+ only */
65#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 85#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
66#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 86#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -234,6 +254,9 @@
234#define I965_FENCE_REG_VALID (1<<0) 254#define I965_FENCE_REG_VALID (1<<0)
235#define I965_FENCE_MAX_PITCH_VAL 0x0400 255#define I965_FENCE_MAX_PITCH_VAL 0x0400
236 256
257#define FENCE_REG_SANDYBRIDGE_0 0x100000
258#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
259
237/* 260/*
238 * Instruction and interrupt control regs 261 * Instruction and interrupt control regs
239 */ 262 */
@@ -265,6 +288,7 @@
265#define INSTDONE1 0x0207c /* 965+ only */ 288#define INSTDONE1 0x0207c /* 965+ only */
266#define ACTHD_I965 0x02074 289#define ACTHD_I965 0x02074
267#define HWS_PGA 0x02080 290#define HWS_PGA 0x02080
291#define HWS_PGA_GEN6 0x04080
268#define HWS_ADDRESS_MASK 0xfffff000 292#define HWS_ADDRESS_MASK 0xfffff000
269#define HWS_START_ADDRESS_SHIFT 4 293#define HWS_START_ADDRESS_SHIFT 4
270#define PWRCTXA 0x2088 /* 965GM+ only */ 294#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -282,7 +306,7 @@
282#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 306#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
283#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 307#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
284#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 308#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
285#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 309#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
286#define I915_HWB_OOM_INTERRUPT (1<<13) 310#define I915_HWB_OOM_INTERRUPT (1<<13)
287#define I915_SYNC_STATUS_INTERRUPT (1<<12) 311#define I915_SYNC_STATUS_INTERRUPT (1<<12)
288#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 312#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -306,11 +330,14 @@
306#define I915_ERROR_MEMORY_REFRESH (1<<1) 330#define I915_ERROR_MEMORY_REFRESH (1<<1)
307#define I915_ERROR_INSTRUCTION (1<<0) 331#define I915_ERROR_INSTRUCTION (1<<0)
308#define INSTPM 0x020c0 332#define INSTPM 0x020c0
333#define INSTPM_SELF_EN (1<<12) /* 915GM only */
309#define ACTHD 0x020c8 334#define ACTHD 0x020c8
310#define FW_BLC 0x020d8 335#define FW_BLC 0x020d8
311#define FW_BLC2 0x020dc 336#define FW_BLC2 0x020dc
312#define FW_BLC_SELF 0x020e0 /* 915+ only */ 337#define FW_BLC_SELF 0x020e0 /* 915+ only */
313#define FW_BLC_SELF_EN (1<<15) 338#define FW_BLC_SELF_EN_MASK (1<<31)
339#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
340#define FW_BLC_SELF_EN (1<<15) /* 945 only */
314#define MM_BURST_LENGTH 0x00700000 341#define MM_BURST_LENGTH 0x00700000
315#define MM_FIFO_WATERMARK 0x0001F000 342#define MM_FIFO_WATERMARK 0x0001F000
316#define LM_BURST_LENGTH 0x00000700 343#define LM_BURST_LENGTH 0x00000700
@@ -324,6 +351,7 @@
324#define CM0_COLOR_EVICT_DISABLE (1<<3) 351#define CM0_COLOR_EVICT_DISABLE (1<<3)
325#define CM0_DEPTH_WRITE_DISABLE (1<<1) 352#define CM0_DEPTH_WRITE_DISABLE (1<<1)
326#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 353#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
354#define BB_ADDR 0x02140 /* 8 bytes */
327#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 355#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
328 356
329 357
@@ -784,10 +812,144 @@
784#define CLKCFG_MEM_800 (3 << 4) 812#define CLKCFG_MEM_800 (3 << 4)
785#define CLKCFG_MEM_MASK (7 << 4) 813#define CLKCFG_MEM_MASK (7 << 4)
786 814
787/** GM965 GM45 render standby register */ 815#define CRSTANDVID 0x11100
788#define MCHBAR_RENDER_STANDBY 0x111B8 816#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
817#define PXVFREQ_PX_MASK 0x7f000000
818#define PXVFREQ_PX_SHIFT 24
819#define VIDFREQ_BASE 0x11110
820#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
821#define VIDFREQ2 0x11114
822#define VIDFREQ3 0x11118
823#define VIDFREQ4 0x1111c
824#define VIDFREQ_P0_MASK 0x1f000000
825#define VIDFREQ_P0_SHIFT 24
826#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
827#define VIDFREQ_P0_CSCLK_SHIFT 20
828#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
829#define VIDFREQ_P0_CRCLK_SHIFT 16
830#define VIDFREQ_P1_MASK 0x00001f00
831#define VIDFREQ_P1_SHIFT 8
832#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
833#define VIDFREQ_P1_CSCLK_SHIFT 4
834#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
835#define INTTOEXT_BASE_ILK 0x11300
836#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
837#define INTTOEXT_MAP3_SHIFT 24
838#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
839#define INTTOEXT_MAP2_SHIFT 16
840#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
841#define INTTOEXT_MAP1_SHIFT 8
842#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
843#define INTTOEXT_MAP0_SHIFT 0
844#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
845#define MEMSWCTL 0x11170 /* Ironlake only */
846#define MEMCTL_CMD_MASK 0xe000
847#define MEMCTL_CMD_SHIFT 13
848#define MEMCTL_CMD_RCLK_OFF 0
849#define MEMCTL_CMD_RCLK_ON 1
850#define MEMCTL_CMD_CHFREQ 2
851#define MEMCTL_CMD_CHVID 3
852#define MEMCTL_CMD_VMMOFF 4
853#define MEMCTL_CMD_VMMON 5
854#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
855 when command complete */
856#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
857#define MEMCTL_FREQ_SHIFT 8
858#define MEMCTL_SFCAVM (1<<7)
859#define MEMCTL_TGT_VID_MASK 0x007f
860#define MEMIHYST 0x1117c
861#define MEMINTREN 0x11180 /* 16 bits */
862#define MEMINT_RSEXIT_EN (1<<8)
863#define MEMINT_CX_SUPR_EN (1<<7)
864#define MEMINT_CONT_BUSY_EN (1<<6)
865#define MEMINT_AVG_BUSY_EN (1<<5)
866#define MEMINT_EVAL_CHG_EN (1<<4)
867#define MEMINT_MON_IDLE_EN (1<<3)
868#define MEMINT_UP_EVAL_EN (1<<2)
869#define MEMINT_DOWN_EVAL_EN (1<<1)
870#define MEMINT_SW_CMD_EN (1<<0)
871#define MEMINTRSTR 0x11182 /* 16 bits */
872#define MEM_RSEXIT_MASK 0xc000
873#define MEM_RSEXIT_SHIFT 14
874#define MEM_CONT_BUSY_MASK 0x3000
875#define MEM_CONT_BUSY_SHIFT 12
876#define MEM_AVG_BUSY_MASK 0x0c00
877#define MEM_AVG_BUSY_SHIFT 10
878#define MEM_EVAL_CHG_MASK 0x0300
879#define MEM_EVAL_BUSY_SHIFT 8
880#define MEM_MON_IDLE_MASK 0x00c0
881#define MEM_MON_IDLE_SHIFT 6
882#define MEM_UP_EVAL_MASK 0x0030
883#define MEM_UP_EVAL_SHIFT 4
884#define MEM_DOWN_EVAL_MASK 0x000c
885#define MEM_DOWN_EVAL_SHIFT 2
886#define MEM_SW_CMD_MASK 0x0003
887#define MEM_INT_STEER_GFX 0
888#define MEM_INT_STEER_CMR 1
889#define MEM_INT_STEER_SMI 2
890#define MEM_INT_STEER_SCI 3
891#define MEMINTRSTS 0x11184
892#define MEMINT_RSEXIT (1<<7)
893#define MEMINT_CONT_BUSY (1<<6)
894#define MEMINT_AVG_BUSY (1<<5)
895#define MEMINT_EVAL_CHG (1<<4)
896#define MEMINT_MON_IDLE (1<<3)
897#define MEMINT_UP_EVAL (1<<2)
898#define MEMINT_DOWN_EVAL (1<<1)
899#define MEMINT_SW_CMD (1<<0)
900#define MEMMODECTL 0x11190
901#define MEMMODE_BOOST_EN (1<<31)
902#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
903#define MEMMODE_BOOST_FREQ_SHIFT 24
904#define MEMMODE_IDLE_MODE_MASK 0x00030000
905#define MEMMODE_IDLE_MODE_SHIFT 16
906#define MEMMODE_IDLE_MODE_EVAL 0
907#define MEMMODE_IDLE_MODE_CONT 1
908#define MEMMODE_HWIDLE_EN (1<<15)
909#define MEMMODE_SWMODE_EN (1<<14)
910#define MEMMODE_RCLK_GATE (1<<13)
911#define MEMMODE_HW_UPDATE (1<<12)
912#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
913#define MEMMODE_FSTART_SHIFT 8
914#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
915#define MEMMODE_FMAX_SHIFT 4
916#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
917#define RCBMAXAVG 0x1119c
918#define MEMSWCTL2 0x1119e /* Cantiga only */
919#define SWMEMCMD_RENDER_OFF (0 << 13)
920#define SWMEMCMD_RENDER_ON (1 << 13)
921#define SWMEMCMD_SWFREQ (2 << 13)
922#define SWMEMCMD_TARVID (3 << 13)
923#define SWMEMCMD_VRM_OFF (4 << 13)
924#define SWMEMCMD_VRM_ON (5 << 13)
925#define CMDSTS (1<<12)
926#define SFCAVM (1<<11)
927#define SWFREQ_MASK 0x0380 /* P0-7 */
928#define SWFREQ_SHIFT 7
929#define TARVID_MASK 0x001f
930#define MEMSTAT_CTG 0x111a0
931#define RCBMINAVG 0x111a0
932#define RCUPEI 0x111b0
933#define RCDNEI 0x111b4
934#define MCHBAR_RENDER_STANDBY 0x111b8
789#define RCX_SW_EXIT (1<<23) 935#define RCX_SW_EXIT (1<<23)
790#define RSX_STATUS_MASK 0x00700000 936#define RSX_STATUS_MASK 0x00700000
937#define VIDCTL 0x111c0
938#define VIDSTS 0x111c8
939#define VIDSTART 0x111cc /* 8 bits */
940#define MEMSTAT_ILK 0x111f8
941#define MEMSTAT_VID_MASK 0x7f00
942#define MEMSTAT_VID_SHIFT 8
943#define MEMSTAT_PSTATE_MASK 0x00f8
944#define MEMSTAT_PSTATE_SHIFT 3
945#define MEMSTAT_MON_ACTV (1<<2)
946#define MEMSTAT_SRC_CTL_MASK 0x0003
947#define MEMSTAT_SRC_CTL_CORE 0
948#define MEMSTAT_SRC_CTL_TRB 1
949#define MEMSTAT_SRC_CTL_THM 2
950#define MEMSTAT_SRC_CTL_STDBY 3
951#define RCPREVBSYTUPAVG 0x113b8
952#define RCPREVBSYTDNAVG 0x113bc
791#define PEG_BAND_GAP_DATA 0x14d68 953#define PEG_BAND_GAP_DATA 0x14d68
792 954
793/* 955/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561dc..ac0d1a73ac22 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
685 I915_WRITE(MCHBAR_RENDER_STANDBY,
686 dev_priv->saveMCHBAR_RENDER_STANDBY);
685 } else { 687 } else {
686 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 688 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
687 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 689 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev)
745 dev_priv->saveGTIMR = I915_READ(GTIMR); 747 dev_priv->saveGTIMR = I915_READ(GTIMR);
746 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 748 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
747 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 749 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
750 dev_priv->saveMCHBAR_RENDER_STANDBY =
751 I915_READ(MCHBAR_RENDER_STANDBY);
748 } else { 752 } else {
749 dev_priv->saveIER = I915_READ(IER); 753 dev_priv->saveIER = I915_READ(IER);
750 dev_priv->saveIMR = I915_READ(IMR); 754 dev_priv->saveIMR = I915_READ(IMR);
751 } 755 }
752 756
757 if (IS_IRONLAKE_M(dev))
758 ironlake_disable_drps(dev);
759
753 /* Cache mode state */ 760 /* Cache mode state */
754 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 761 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
755 762
@@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev)
820 /* Clock gating state */ 827 /* Clock gating state */
821 intel_init_clock_gating(dev); 828 intel_init_clock_gating(dev);
822 829
830 if (IS_IRONLAKE_M(dev))
831 ironlake_enable_drps(dev);
832
823 /* Cache mode state */ 833 /* Cache mode state */
824 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 834 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
825 835
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b5a83e..70c9d4ba7042 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -247,6 +247,7 @@ static void
247parse_general_features(struct drm_i915_private *dev_priv, 247parse_general_features(struct drm_i915_private *dev_priv,
248 struct bdb_header *bdb) 248 struct bdb_header *bdb)
249{ 249{
250 struct drm_device *dev = dev_priv->dev;
250 struct bdb_general_features *general; 251 struct bdb_general_features *general;
251 252
252 /* Set sensible defaults in case we can't find the general block */ 253 /* Set sensible defaults in case we can't find the general block */
@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
263 if (IS_I85X(dev_priv->dev)) 264 if (IS_I85X(dev_priv->dev))
264 dev_priv->lvds_ssc_freq = 265 dev_priv->lvds_ssc_freq =
265 general->ssc_freq ? 66 : 48; 266 general->ssc_freq ? 66 : 48;
266 else if (IS_IRONLAKE(dev_priv->dev)) 267 else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
267 dev_priv->lvds_ssc_freq = 268 dev_priv->lvds_ssc_freq =
268 general->ssc_freq ? 100 : 120; 269 general->ssc_freq ? 100 : 120;
269 else 270 else
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd4026586f..fccf07470c8f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 40 u32 temp, reg;
41 41
42 if (IS_IRONLAKE(dev)) 42 if (HAS_PCH_SPLIT(dev))
43 reg = PCH_ADPA; 43 reg = PCH_ADPA;
44 else 44 else
45 reg = ADPA; 45 reg = ADPA;
@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 else 113 else
114 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
115 115
116 if (IS_IRONLAKE(dev)) 116 if (HAS_PCH_SPLIT(dev))
117 adpa_reg = PCH_ADPA; 117 adpa_reg = PCH_ADPA;
118 else 118 else
119 adpa_reg = ADPA; 119 adpa_reg = ADPA;
@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
122 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
123 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
124 */ 124 */
125 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 125 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
126 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
127 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
136 136
137 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
138 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
139 if (!IS_IRONLAKE(dev)) 139 if (!HAS_PCH_SPLIT(dev))
140 I915_WRITE(BCLRPAT_A, 0); 140 I915_WRITE(BCLRPAT_A, 0);
141 } else { 141 } else {
142 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
143 if (!IS_IRONLAKE(dev)) 143 if (!HAS_PCH_SPLIT(dev))
144 I915_WRITE(BCLRPAT_B, 0); 144 I915_WRITE(BCLRPAT_B, 0);
145 } 145 }
146 146
@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
202 u32 hotplug_en; 202 u32 hotplug_en;
203 int i, tries = 0; 203 int i, tries = 0;
204 204
205 if (IS_IRONLAKE(dev)) 205 if (HAS_PCH_SPLIT(dev))
206 return intel_ironlake_crt_detect_hotplug(connector); 206 return intel_ironlake_crt_detect_hotplug(connector);
207 207
208 /* 208 /*
@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
524 &intel_output->enc); 524 &intel_output->enc);
525 525
526 /* Set up the DDC bus. */ 526 /* Set up the DDC bus. */
527 if (IS_IRONLAKE(dev)) 527 if (HAS_PCH_SPLIT(dev))
528 i2c_reg = PCH_GPIOA; 528 i2c_reg = PCH_GPIOA;
529 else { 529 else {
530 i2c_reg = GPIOA; 530 i2c_reg = GPIOA;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b27202d23ebc..9cd6de5f9906 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -232,7 +232,7 @@ struct intel_limit {
232#define G4X_P2_DISPLAY_PORT_FAST 10 232#define G4X_P2_DISPLAY_PORT_FAST 10
233#define G4X_P2_DISPLAY_PORT_LIMIT 0 233#define G4X_P2_DISPLAY_PORT_LIMIT 0
234 234
235/* Ironlake */ 235/* Ironlake / Sandybridge */
236/* as we calculate clock using (register_value + 2) for 236/* as we calculate clock using (register_value + 2) for
237 N/M1/M2, so here the range value for them is (actual_value-2). 237 N/M1/M2, so here the range value for them is (actual_value-2).
238 */ 238 */
@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
690 struct drm_device *dev = crtc->dev; 690 struct drm_device *dev = crtc->dev;
691 const intel_limit_t *limit; 691 const intel_limit_t *limit;
692 692
693 if (IS_IRONLAKE(dev)) 693 if (HAS_PCH_SPLIT(dev))
694 limit = intel_ironlake_limit(crtc); 694 limit = intel_ironlake_limit(crtc);
695 else if (IS_G4X(dev)) { 695 else if (IS_G4X(dev)) {
696 limit = intel_g4x_limit(crtc); 696 limit = intel_g4x_limit(crtc);
@@ -886,7 +886,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
887 int lvds_reg; 887 int lvds_reg;
888 888
889 if (IS_IRONLAKE(dev)) 889 if (HAS_PCH_SPLIT(dev))
890 lvds_reg = PCH_LVDS; 890 lvds_reg = PCH_LVDS;
891 else 891 else
892 lvds_reg = LVDS; 892 lvds_reg = LVDS;
@@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1188 if (intel_fb->obj->size > dev_priv->cfb_size) { 1188 if (intel_fb->obj->size > dev_priv->cfb_size) {
1189 DRM_DEBUG_KMS("framebuffer too large, disabling " 1189 DRM_DEBUG_KMS("framebuffer too large, disabling "
1190 "compression\n"); 1190 "compression\n");
1191 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1191 goto out_disable; 1192 goto out_disable;
1192 } 1193 }
1193 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1194 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1194 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1195 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1195 DRM_DEBUG_KMS("mode incompatible with compression, " 1196 DRM_DEBUG_KMS("mode incompatible with compression, "
1196 "disabling\n"); 1197 "disabling\n");
1198 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1197 goto out_disable; 1199 goto out_disable;
1198 } 1200 }
1199 if ((mode->hdisplay > 2048) || 1201 if ((mode->hdisplay > 2048) ||
1200 (mode->vdisplay > 1536)) { 1202 (mode->vdisplay > 1536)) {
1201 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 1203 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1204 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1202 goto out_disable; 1205 goto out_disable;
1203 } 1206 }
1204 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1207 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1205 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 1208 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1209 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1206 goto out_disable; 1210 goto out_disable;
1207 } 1211 }
1208 if (obj_priv->tiling_mode != I915_TILING_X) { 1212 if (obj_priv->tiling_mode != I915_TILING_X) {
1209 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1213 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1214 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1210 goto out_disable; 1215 goto out_disable;
1211 } 1216 }
1212 1217
@@ -1366,7 +1371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1366 dspcntr &= ~DISPPLANE_TILED; 1371 dspcntr &= ~DISPPLANE_TILED;
1367 } 1372 }
1368 1373
1369 if (IS_IRONLAKE(dev)) 1374 if (HAS_PCH_SPLIT(dev))
1370 /* must disable */ 1375 /* must disable */
1371 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1376 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1372 1377
@@ -1427,7 +1432,7 @@ static void i915_disable_vga (struct drm_device *dev)
1427 u8 sr1; 1432 u8 sr1;
1428 u32 vga_reg; 1433 u32 vga_reg;
1429 1434
1430 if (IS_IRONLAKE(dev)) 1435 if (HAS_PCH_SPLIT(dev))
1431 vga_reg = CPU_VGACNTRL; 1436 vga_reg = CPU_VGACNTRL;
1432 else 1437 else
1433 vga_reg = VGACNTRL; 1438 vga_reg = VGACNTRL;
@@ -2111,7 +2116,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2111 struct drm_display_mode *adjusted_mode) 2116 struct drm_display_mode *adjusted_mode)
2112{ 2117{
2113 struct drm_device *dev = crtc->dev; 2118 struct drm_device *dev = crtc->dev;
2114 if (IS_IRONLAKE(dev)) { 2119 if (HAS_PCH_SPLIT(dev)) {
2115 /* FDI link clock is fixed at 2.7G */ 2120 /* FDI link clock is fixed at 2.7G */
2116 if (mode->clock * 3 > 27000 * 4) 2121 if (mode->clock * 3 > 27000 * 4)
2117 return MODE_CLOCK_HIGH; 2122 return MODE_CLOCK_HIGH;
@@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2757 srwm = total_size - sr_entries; 2762 srwm = total_size - sr_entries;
2758 if (srwm < 0) 2763 if (srwm < 0)
2759 srwm = 1; 2764 srwm = 1;
2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2765
2766 if (IS_I945G(dev) || IS_I945GM(dev))
2767 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2768 else if (IS_I915GM(dev)) {
2769 /* 915M has a smaller SRWM field */
2770 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2771 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
2772 }
2761 } else { 2773 } else {
2762 /* Turn off self refresh if both pipes are enabled */ 2774 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2775 if (IS_I945G(dev) || IS_I945GM(dev)) {
2764 & ~FW_BLC_SELF_EN); 2776 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2777 & ~FW_BLC_SELF_EN);
2778 } else if (IS_I915GM(dev)) {
2779 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
2780 }
2765 } 2781 }
2766 2782
2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2783 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2967,7 +2983,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2967 refclk / 1000); 2983 refclk / 1000);
2968 } else if (IS_I9XX(dev)) { 2984 } else if (IS_I9XX(dev)) {
2969 refclk = 96000; 2985 refclk = 96000;
2970 if (IS_IRONLAKE(dev)) 2986 if (HAS_PCH_SPLIT(dev))
2971 refclk = 120000; /* 120Mhz refclk */ 2987 refclk = 120000; /* 120Mhz refclk */
2972 } else { 2988 } else {
2973 refclk = 48000; 2989 refclk = 48000;
@@ -3025,7 +3041,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3025 } 3041 }
3026 3042
3027 /* FDI link */ 3043 /* FDI link */
3028 if (IS_IRONLAKE(dev)) { 3044 if (HAS_PCH_SPLIT(dev)) {
3029 int lane, link_bw, bpp; 3045 int lane, link_bw, bpp;
3030 /* eDP doesn't require FDI link, so just set DP M/N 3046 /* eDP doesn't require FDI link, so just set DP M/N
3031 according to current link config */ 3047 according to current link config */
@@ -3102,7 +3118,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3102 * PCH B stepping, previous chipset stepping should be 3118 * PCH B stepping, previous chipset stepping should be
3103 * ignoring this setting. 3119 * ignoring this setting.
3104 */ 3120 */
3105 if (IS_IRONLAKE(dev)) { 3121 if (HAS_PCH_SPLIT(dev)) {
3106 temp = I915_READ(PCH_DREF_CONTROL); 3122 temp = I915_READ(PCH_DREF_CONTROL);
3107 /* Always enable nonspread source */ 3123 /* Always enable nonspread source */
3108 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3124 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3165,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3149 reduced_clock.m2; 3165 reduced_clock.m2;
3150 } 3166 }
3151 3167
3152 if (!IS_IRONLAKE(dev)) 3168 if (!HAS_PCH_SPLIT(dev))
3153 dpll = DPLL_VGA_MODE_DIS; 3169 dpll = DPLL_VGA_MODE_DIS;
3154 3170
3155 if (IS_I9XX(dev)) { 3171 if (IS_I9XX(dev)) {
@@ -3162,7 +3178,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3162 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3178 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3163 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3179 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3164 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3180 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3165 else if (IS_IRONLAKE(dev)) 3181 else if (HAS_PCH_SPLIT(dev))
3166 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3182 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3167 } 3183 }
3168 if (is_dp) 3184 if (is_dp)
@@ -3174,7 +3190,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3174 else { 3190 else {
3175 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3191 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3176 /* also FPA1 */ 3192 /* also FPA1 */
3177 if (IS_IRONLAKE(dev)) 3193 if (HAS_PCH_SPLIT(dev))
3178 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3194 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3179 if (IS_G4X(dev) && has_reduced_clock) 3195 if (IS_G4X(dev) && has_reduced_clock)
3180 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3196 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3193 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3209 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3194 break; 3210 break;
3195 } 3211 }
3196 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 3212 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
3197 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3213 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3198 } else { 3214 } else {
3199 if (is_lvds) { 3215 if (is_lvds) {
@@ -3227,7 +3243,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3227 3243
3228 /* Ironlake's plane is forced to pipe, bit 24 is to 3244 /* Ironlake's plane is forced to pipe, bit 24 is to
3229 enable color space conversion */ 3245 enable color space conversion */
3230 if (!IS_IRONLAKE(dev)) { 3246 if (!HAS_PCH_SPLIT(dev)) {
3231 if (pipe == 0) 3247 if (pipe == 0)
3232 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3248 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3233 else 3249 else
@@ -3254,14 +3270,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3254 3270
3255 3271
3256 /* Disable the panel fitter if it was on our pipe */ 3272 /* Disable the panel fitter if it was on our pipe */
3257 if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) 3273 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3258 I915_WRITE(PFIT_CONTROL, 0); 3274 I915_WRITE(PFIT_CONTROL, 0);
3259 3275
3260 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3276 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3261 drm_mode_debug_printmodeline(mode); 3277 drm_mode_debug_printmodeline(mode);
3262 3278
3263 /* assign to Ironlake registers */ 3279 /* assign to Ironlake registers */
3264 if (IS_IRONLAKE(dev)) { 3280 if (HAS_PCH_SPLIT(dev)) {
3265 fp_reg = pch_fp_reg; 3281 fp_reg = pch_fp_reg;
3266 dpll_reg = pch_dpll_reg; 3282 dpll_reg = pch_dpll_reg;
3267 } 3283 }
@@ -3282,7 +3298,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3282 if (is_lvds) { 3298 if (is_lvds) {
3283 u32 lvds; 3299 u32 lvds;
3284 3300
3285 if (IS_IRONLAKE(dev)) 3301 if (HAS_PCH_SPLIT(dev))
3286 lvds_reg = PCH_LVDS; 3302 lvds_reg = PCH_LVDS;
3287 3303
3288 lvds = I915_READ(lvds_reg); 3304 lvds = I915_READ(lvds_reg);
@@ -3304,12 +3320,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3304 /* set the dithering flag */ 3320 /* set the dithering flag */
3305 if (IS_I965G(dev)) { 3321 if (IS_I965G(dev)) {
3306 if (dev_priv->lvds_dither) { 3322 if (dev_priv->lvds_dither) {
3307 if (IS_IRONLAKE(dev)) 3323 if (HAS_PCH_SPLIT(dev))
3308 pipeconf |= PIPE_ENABLE_DITHER; 3324 pipeconf |= PIPE_ENABLE_DITHER;
3309 else 3325 else
3310 lvds |= LVDS_ENABLE_DITHER; 3326 lvds |= LVDS_ENABLE_DITHER;
3311 } else { 3327 } else {
3312 if (IS_IRONLAKE(dev)) 3328 if (HAS_PCH_SPLIT(dev))
3313 pipeconf &= ~PIPE_ENABLE_DITHER; 3329 pipeconf &= ~PIPE_ENABLE_DITHER;
3314 else 3330 else
3315 lvds &= ~LVDS_ENABLE_DITHER; 3331 lvds &= ~LVDS_ENABLE_DITHER;
@@ -3328,7 +3344,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3328 /* Wait for the clocks to stabilize. */ 3344 /* Wait for the clocks to stabilize. */
3329 udelay(150); 3345 udelay(150);
3330 3346
3331 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 3347 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
3332 if (is_sdvo) { 3348 if (is_sdvo) {
3333 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3349 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3334 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3350 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3375,14 +3391,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3375 /* pipesrc and dspsize control the size that is scaled from, which should 3391 /* pipesrc and dspsize control the size that is scaled from, which should
3376 * always be the user's requested size. 3392 * always be the user's requested size.
3377 */ 3393 */
3378 if (!IS_IRONLAKE(dev)) { 3394 if (!HAS_PCH_SPLIT(dev)) {
3379 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3395 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3380 (mode->hdisplay - 1)); 3396 (mode->hdisplay - 1));
3381 I915_WRITE(dsppos_reg, 0); 3397 I915_WRITE(dsppos_reg, 0);
3382 } 3398 }
3383 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3399 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3384 3400
3385 if (IS_IRONLAKE(dev)) { 3401 if (HAS_PCH_SPLIT(dev)) {
3386 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3402 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3387 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3403 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3388 I915_WRITE(link_m1_reg, m_n.link_m); 3404 I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3438,7 +3454,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3438 return; 3454 return;
3439 3455
3440 /* use legacy palette for Ironlake */ 3456 /* use legacy palette for Ironlake */
3441 if (IS_IRONLAKE(dev)) 3457 if (HAS_PCH_SPLIT(dev))
3442 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3458 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3443 LGC_PALETTE_B; 3459 LGC_PALETTE_B;
3444 3460
@@ -3553,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3553 intel_crtc->cursor_bo = bo; 3569 intel_crtc->cursor_bo = bo;
3554 3570
3555 return 0; 3571 return 0;
3556fail:
3557 mutex_lock(&dev->struct_mutex);
3558fail_locked: 3572fail_locked:
3559 drm_gem_object_unreference(bo);
3560 mutex_unlock(&dev->struct_mutex); 3573 mutex_unlock(&dev->struct_mutex);
3574fail:
3575 drm_gem_object_unreference_unlocked(bo);
3561 return ret; 3576 return ret;
3562} 3577}
3563 3578
@@ -3922,7 +3937,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3922 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3937 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3923 int dpll = I915_READ(dpll_reg); 3938 int dpll = I915_READ(dpll_reg);
3924 3939
3925 if (IS_IRONLAKE(dev)) 3940 if (HAS_PCH_SPLIT(dev))
3926 return; 3941 return;
3927 3942
3928 if (!dev_priv->lvds_downclock_avail) 3943 if (!dev_priv->lvds_downclock_avail)
@@ -3961,7 +3976,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3961 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3976 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3962 int dpll = I915_READ(dpll_reg); 3977 int dpll = I915_READ(dpll_reg);
3963 3978
3964 if (IS_IRONLAKE(dev)) 3979 if (HAS_PCH_SPLIT(dev))
3965 return; 3980 return;
3966 3981
3967 if (!dev_priv->lvds_downclock_avail) 3982 if (!dev_priv->lvds_downclock_avail)
@@ -4011,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work)
4011 4026
4012 mutex_lock(&dev->struct_mutex); 4027 mutex_lock(&dev->struct_mutex);
4013 4028
4029 if (IS_I945G(dev) || IS_I945GM(dev)) {
4030 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4031 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4032 }
4033
4014 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4034 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4015 /* Skip inactive CRTCs */ 4035 /* Skip inactive CRTCs */
4016 if (!crtc->fb) 4036 if (!crtc->fb)
@@ -4044,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4044 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4064 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4045 return; 4065 return;
4046 4066
4047 if (!dev_priv->busy) 4067 if (!dev_priv->busy) {
4068 if (IS_I945G(dev) || IS_I945GM(dev)) {
4069 u32 fw_blc_self;
4070
4071 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4072 fw_blc_self = I915_READ(FW_BLC_SELF);
4073 fw_blc_self &= ~FW_BLC_SELF_EN;
4074 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4075 }
4048 dev_priv->busy = true; 4076 dev_priv->busy = true;
4049 else 4077 } else
4050 mod_timer(&dev_priv->idle_timer, jiffies + 4078 mod_timer(&dev_priv->idle_timer, jiffies +
4051 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4079 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4052 4080
@@ -4058,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4058 intel_fb = to_intel_framebuffer(crtc->fb); 4086 intel_fb = to_intel_framebuffer(crtc->fb);
4059 if (intel_fb->obj == obj) { 4087 if (intel_fb->obj == obj) {
4060 if (!intel_crtc->busy) { 4088 if (!intel_crtc->busy) {
4089 if (IS_I945G(dev) || IS_I945GM(dev)) {
4090 u32 fw_blc_self;
4091
4092 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4093 fw_blc_self = I915_READ(FW_BLC_SELF);
4094 fw_blc_self &= ~FW_BLC_SELF_EN;
4095 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4096 }
4061 /* Non-busy -> busy, upclock */ 4097 /* Non-busy -> busy, upclock */
4062 intel_increase_pllclock(crtc, true); 4098 intel_increase_pllclock(crtc, true);
4063 intel_crtc->busy = true; 4099 intel_crtc->busy = true;
@@ -4382,7 +4418,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4382 if (IS_MOBILE(dev) && !IS_I830(dev)) 4418 if (IS_MOBILE(dev) && !IS_I830(dev))
4383 intel_lvds_init(dev); 4419 intel_lvds_init(dev);
4384 4420
4385 if (IS_IRONLAKE(dev)) { 4421 if (HAS_PCH_SPLIT(dev)) {
4386 int found; 4422 int found;
4387 4423
4388 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4424 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4451,7 +4487,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4451 DRM_DEBUG_KMS("probing DP_D\n"); 4487 DRM_DEBUG_KMS("probing DP_D\n");
4452 intel_dp_init(dev, DP_D); 4488 intel_dp_init(dev, DP_D);
4453 } 4489 }
4454 } else if (IS_I8XX(dev)) 4490 } else if (IS_GEN2(dev))
4455 intel_dvo_init(dev); 4491 intel_dvo_init(dev);
4456 4492
4457 if (SUPPORTS_TV(dev)) 4493 if (SUPPORTS_TV(dev))
@@ -4476,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
4476 intelfb_remove(dev, fb); 4512 intelfb_remove(dev, fb);
4477 4513
4478 drm_framebuffer_cleanup(fb); 4514 drm_framebuffer_cleanup(fb);
4479 mutex_lock(&dev->struct_mutex); 4515 drm_gem_object_unreference_unlocked(intel_fb->obj);
4480 drm_gem_object_unreference(intel_fb->obj);
4481 mutex_unlock(&dev->struct_mutex);
4482 4516
4483 kfree(intel_fb); 4517 kfree(intel_fb);
4484} 4518}
@@ -4541,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
4541 4575
4542 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 4576 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
4543 if (ret) { 4577 if (ret) {
4544 mutex_lock(&dev->struct_mutex); 4578 drm_gem_object_unreference_unlocked(obj);
4545 drm_gem_object_unreference(obj);
4546 mutex_unlock(&dev->struct_mutex);
4547 return NULL; 4579 return NULL;
4548 } 4580 }
4549 4581
@@ -4591,6 +4623,91 @@ err_unref:
4591 return NULL; 4623 return NULL;
4592} 4624}
4593 4625
4626void ironlake_enable_drps(struct drm_device *dev)
4627{
4628 struct drm_i915_private *dev_priv = dev->dev_private;
4629 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4630 u8 fmax, fmin, fstart, vstart;
4631 int i = 0;
4632
4633 /* 100ms RC evaluation intervals */
4634 I915_WRITE(RCUPEI, 100000);
4635 I915_WRITE(RCDNEI, 100000);
4636
4637 /* Set max/min thresholds to 90ms and 80ms respectively */
4638 I915_WRITE(RCBMAXAVG, 90000);
4639 I915_WRITE(RCBMINAVG, 80000);
4640
4641 I915_WRITE(MEMIHYST, 1);
4642
4643 /* Set up min, max, and cur for interrupt handling */
4644 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4645 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4646 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4647 MEMMODE_FSTART_SHIFT;
4648 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4649 PXVFREQ_PX_SHIFT;
4650
4651 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
4652 dev_priv->min_delay = fmin;
4653 dev_priv->cur_delay = fstart;
4654
4655 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4656
4657 /*
4658 * Interrupts will be enabled in ironlake_irq_postinstall
4659 */
4660
4661 I915_WRITE(VIDSTART, vstart);
4662 POSTING_READ(VIDSTART);
4663
4664 rgvmodectl |= MEMMODE_SWMODE_EN;
4665 I915_WRITE(MEMMODECTL, rgvmodectl);
4666
4667 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
4668 if (i++ > 100) {
4669 DRM_ERROR("stuck trying to change perf mode\n");
4670 break;
4671 }
4672 msleep(1);
4673 }
4674 msleep(1);
4675
4676 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4677 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4678 I915_WRITE(MEMSWCTL, rgvswctl);
4679 POSTING_READ(MEMSWCTL);
4680
4681 rgvswctl |= MEMCTL_CMD_STS;
4682 I915_WRITE(MEMSWCTL, rgvswctl);
4683}
4684
4685void ironlake_disable_drps(struct drm_device *dev)
4686{
4687 struct drm_i915_private *dev_priv = dev->dev_private;
4688 u32 rgvswctl;
4689 u8 fstart;
4690
4691 /* Ack interrupts, disable EFC interrupt */
4692 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4693 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4694 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4695 I915_WRITE(DEIIR, DE_PCU_EVENT);
4696 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4697
4698 /* Go back to the starting frequency */
4699 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
4700 MEMMODE_FSTART_SHIFT;
4701 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4702 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4703 I915_WRITE(MEMSWCTL, rgvswctl);
4704 msleep(1);
4705 rgvswctl |= MEMCTL_CMD_STS;
4706 I915_WRITE(MEMSWCTL, rgvswctl);
4707 msleep(1);
4708
4709}
4710
4594void intel_init_clock_gating(struct drm_device *dev) 4711void intel_init_clock_gating(struct drm_device *dev)
4595{ 4712{
4596 struct drm_i915_private *dev_priv = dev->dev_private; 4713 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4599,7 +4716,7 @@ void intel_init_clock_gating(struct drm_device *dev)
4599 * Disable clock gating reported to work incorrectly according to the 4716 * Disable clock gating reported to work incorrectly according to the
4600 * specs, but enable as much else as we can. 4717 * specs, but enable as much else as we can.
4601 */ 4718 */
4602 if (IS_IRONLAKE(dev)) { 4719 if (HAS_PCH_SPLIT(dev)) {
4603 return; 4720 return;
4604 } else if (IS_G4X(dev)) { 4721 } else if (IS_G4X(dev)) {
4605 uint32_t dspclk_gate; 4722 uint32_t dspclk_gate;
@@ -4672,7 +4789,7 @@ static void intel_init_display(struct drm_device *dev)
4672 struct drm_i915_private *dev_priv = dev->dev_private; 4789 struct drm_i915_private *dev_priv = dev->dev_private;
4673 4790
4674 /* We always want a DPMS function */ 4791 /* We always want a DPMS function */
4675 if (IS_IRONLAKE(dev)) 4792 if (HAS_PCH_SPLIT(dev))
4676 dev_priv->display.dpms = ironlake_crtc_dpms; 4793 dev_priv->display.dpms = ironlake_crtc_dpms;
4677 else 4794 else
4678 dev_priv->display.dpms = i9xx_crtc_dpms; 4795 dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4715,7 +4832,7 @@ static void intel_init_display(struct drm_device *dev)
4715 i830_get_display_clock_speed; 4832 i830_get_display_clock_speed;
4716 4833
4717 /* For FIFO watermark updates */ 4834 /* For FIFO watermark updates */
4718 if (IS_IRONLAKE(dev)) 4835 if (HAS_PCH_SPLIT(dev))
4719 dev_priv->display.update_wm = NULL; 4836 dev_priv->display.update_wm = NULL;
4720 else if (IS_G4X(dev)) 4837 else if (IS_G4X(dev))
4721 dev_priv->display.update_wm = g4x_update_wm; 4838 dev_priv->display.update_wm = g4x_update_wm;
@@ -4774,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev)
4774 DRM_DEBUG_KMS("%d display pipe%s available.\n", 4891 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4775 num_pipe, num_pipe > 1 ? "s" : ""); 4892 num_pipe, num_pipe > 1 ? "s" : "");
4776 4893
4777 if (IS_I85X(dev))
4778 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
4779 else if (IS_I9XX(dev) || IS_G4X(dev))
4780 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
4781
4782 for (i = 0; i < num_pipe; i++) { 4894 for (i = 0; i < num_pipe; i++) {
4783 intel_crtc_init(dev, i); 4895 intel_crtc_init(dev, i);
4784 } 4896 }
@@ -4787,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev)
4787 4899
4788 intel_init_clock_gating(dev); 4900 intel_init_clock_gating(dev);
4789 4901
4902 if (IS_IRONLAKE_M(dev))
4903 ironlake_enable_drps(dev);
4904
4790 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4905 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4791 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4906 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4792 (unsigned long)dev); 4907 (unsigned long)dev);
@@ -4834,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4834 drm_gem_object_unreference(dev_priv->pwrctx); 4949 drm_gem_object_unreference(dev_priv->pwrctx);
4835 } 4950 }
4836 4951
4952 if (IS_IRONLAKE_M(dev))
4953 ironlake_disable_drps(dev);
4954
4837 mutex_unlock(&dev->struct_mutex); 4955 mutex_unlock(&dev->struct_mutex);
4838 4956
4839 drm_mode_config_cleanup(dev); 4957 drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 439506cefc14..3ef3a0d0edd0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -231,7 +231,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
231 */ 231 */
232 if (IS_eDP(intel_output)) 232 if (IS_eDP(intel_output))
233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
234 else if (IS_IRONLAKE(dev)) 234 else if (HAS_PCH_SPLIT(dev))
235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
236 else 236 else
237 aux_clock_divider = intel_hrawclk(dev) / 2; 237 aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -584,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
584 intel_dp_compute_m_n(3, lane_count, 584 intel_dp_compute_m_n(3, lane_count,
585 mode->clock, adjusted_mode->clock, &m_n); 585 mode->clock, adjusted_mode->clock, &m_n);
586 586
587 if (IS_IRONLAKE(dev)) { 587 if (HAS_PCH_SPLIT(dev)) {
588 if (intel_crtc->pipe == 0) { 588 if (intel_crtc->pipe == 0) {
589 I915_WRITE(TRANSA_DATA_M1, 589 I915_WRITE(TRANSA_DATA_M1,
590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -1176,7 +1176,7 @@ intel_dp_detect(struct drm_connector *connector)
1176 1176
1177 dp_priv->has_audio = false; 1177 dp_priv->has_audio = false;
1178 1178
1179 if (IS_IRONLAKE(dev)) 1179 if (HAS_PCH_SPLIT(dev))
1180 return ironlake_dp_detect(connector); 1180 return ironlake_dp_detect(connector);
1181 1181
1182 temp = I915_READ(PORT_HOTPLUG_EN); 1182 temp = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff6..3a467ca57857 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
210 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev); 211extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev);
212 214
213extern int intel_framebuffer_create(struct drm_device *dev, 215extern int intel_framebuffer_create(struct drm_device *dev,
214 struct drm_mode_fb_cmd *mode_cmd, 216 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index aaabbcbe5905..8cd791dc5b29 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/fb.h> 36#include <linux/fb.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/vga_switcheroo.h>
38 39
39#include "drmP.h" 40#include "drmP.h"
40#include "drm.h" 41#include "drm.h"
@@ -235,6 +236,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
235 obj_priv->gtt_offset, fbo); 236 obj_priv->gtt_offset, fbo);
236 237
237 mutex_unlock(&dev->struct_mutex); 238 mutex_unlock(&dev->struct_mutex);
239 vga_switcheroo_client_fb_set(dev->pdev, info);
238 return 0; 240 return 0;
239 241
240out_unpin: 242out_unpin:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0e268deed761..a30f8bfc1985 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing. 83 * we do this anyway which shows more stable in testing.
84 */ 84 */
85 if (IS_IRONLAKE(dev)) { 85 if (HAS_PCH_SPLIT(dev)) {
86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg); 87 POSTING_READ(hdmi_priv->sdvox_reg);
88 } 88 }
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
99 /* HW workaround, need to write this twice for issue that may result 99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked. 100 * in first write getting masked.
101 */ 101 */
102 if (IS_IRONLAKE(dev)) { 102 if (HAS_PCH_SPLIT(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp); 103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg); 104 POSTING_READ(hdmi_priv->sdvox_reg);
105 } 105 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c735b8ab..fcc753ca5d94 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
128{ 128{
129 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 130
131 if (IS_IRONLAKE(dev)) { 131 if (HAS_PCH_SPLIT(dev)) {
132 I915_WRITE(PCH_GMBUS0, 0); 132 I915_WRITE(PCH_GMBUS0, 0);
133 } else { 133 } else {
134 I915_WRITE(GMBUS0, 0); 134 I915_WRITE(GMBUS0, 0);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45780d5..14e516fdc2dd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
56 struct drm_i915_private *dev_priv = dev->dev_private; 56 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 blc_pwm_ctl, reg; 57 u32 blc_pwm_ctl, reg;
58 58
59 if (IS_IRONLAKE(dev)) 59 if (HAS_PCH_SPLIT(dev))
60 reg = BLC_PWM_CPU_CTL; 60 reg = BLC_PWM_CPU_CTL;
61 else 61 else
62 reg = BLC_PWM_CTL; 62 reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 reg; 75 u32 reg;
76 76
77 if (IS_IRONLAKE(dev)) 77 if (HAS_PCH_SPLIT(dev))
78 reg = BLC_PWM_PCH_CTL2; 78 reg = BLC_PWM_PCH_CTL2;
79 else 79 else
80 reg = BLC_PWM_CTL; 80 reg = BLC_PWM_CTL;
@@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
89static void intel_lvds_set_power(struct drm_device *dev, bool on) 89static void intel_lvds_set_power(struct drm_device *dev, bool on)
90{ 90{
91 struct drm_i915_private *dev_priv = dev->dev_private; 91 struct drm_i915_private *dev_priv = dev->dev_private;
92 u32 pp_status, ctl_reg, status_reg; 92 u32 pp_status, ctl_reg, status_reg, lvds_reg;
93 93
94 if (IS_IRONLAKE(dev)) { 94 if (HAS_PCH_SPLIT(dev)) {
95 ctl_reg = PCH_PP_CONTROL; 95 ctl_reg = PCH_PP_CONTROL;
96 status_reg = PCH_PP_STATUS; 96 status_reg = PCH_PP_STATUS;
97 lvds_reg = PCH_LVDS;
97 } else { 98 } else {
98 ctl_reg = PP_CONTROL; 99 ctl_reg = PP_CONTROL;
99 status_reg = PP_STATUS; 100 status_reg = PP_STATUS;
101 lvds_reg = LVDS;
100 } 102 }
101 103
102 if (on) { 104 if (on) {
105 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
106 POSTING_READ(lvds_reg);
107
103 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | 108 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
104 POWER_TARGET_ON); 109 POWER_TARGET_ON);
105 do { 110 do {
@@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
115 do { 120 do {
116 pp_status = I915_READ(status_reg); 121 pp_status = I915_READ(status_reg);
117 } while (pp_status & PP_ON); 122 } while (pp_status & PP_ON);
123
124 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
125 POSTING_READ(lvds_reg);
118 } 126 }
119} 127}
120 128
@@ -137,7 +145,7 @@ static void intel_lvds_save(struct drm_connector *connector)
137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 145 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
138 u32 pwm_ctl_reg; 146 u32 pwm_ctl_reg;
139 147
140 if (IS_IRONLAKE(dev)) { 148 if (HAS_PCH_SPLIT(dev)) {
141 pp_on_reg = PCH_PP_ON_DELAYS; 149 pp_on_reg = PCH_PP_ON_DELAYS;
142 pp_off_reg = PCH_PP_OFF_DELAYS; 150 pp_off_reg = PCH_PP_OFF_DELAYS;
143 pp_ctl_reg = PCH_PP_CONTROL; 151 pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +182,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 182 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
175 u32 pwm_ctl_reg; 183 u32 pwm_ctl_reg;
176 184
177 if (IS_IRONLAKE(dev)) { 185 if (HAS_PCH_SPLIT(dev)) {
178 pp_on_reg = PCH_PP_ON_DELAYS; 186 pp_on_reg = PCH_PP_ON_DELAYS;
179 pp_off_reg = PCH_PP_OFF_DELAYS; 187 pp_off_reg = PCH_PP_OFF_DELAYS;
180 pp_ctl_reg = PCH_PP_CONTROL; 188 pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +305,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 } 305 }
298 306
299 /* full screen scale for now */ 307 /* full screen scale for now */
300 if (IS_IRONLAKE(dev)) 308 if (HAS_PCH_SPLIT(dev))
301 goto out; 309 goto out;
302 310
303 /* 965+ wants fuzzy fitting */ 311 /* 965+ wants fuzzy fitting */
@@ -327,7 +335,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
327 * to register description and PRM. 335 * to register description and PRM.
328 * Change the value here to see the borders for debugging 336 * Change the value here to see the borders for debugging
329 */ 337 */
330 if (!IS_IRONLAKE(dev)) { 338 if (!HAS_PCH_SPLIT(dev)) {
331 I915_WRITE(BCLRPAT_A, 0); 339 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0); 340 I915_WRITE(BCLRPAT_B, 0);
333 } 341 }
@@ -548,7 +556,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
548 struct drm_i915_private *dev_priv = dev->dev_private; 556 struct drm_i915_private *dev_priv = dev->dev_private;
549 u32 reg; 557 u32 reg;
550 558
551 if (IS_IRONLAKE(dev)) 559 if (HAS_PCH_SPLIT(dev))
552 reg = BLC_PWM_CPU_CTL; 560 reg = BLC_PWM_CPU_CTL;
553 else 561 else
554 reg = BLC_PWM_CTL; 562 reg = BLC_PWM_CTL;
@@ -587,7 +595,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587 * settings. 595 * settings.
588 */ 596 */
589 597
590 if (IS_IRONLAKE(dev)) 598 if (HAS_PCH_SPLIT(dev))
591 return; 599 return;
592 600
593 /* 601 /*
@@ -655,8 +663,15 @@ static const struct dmi_system_id bad_lid_status[] = {
655 */ 663 */
656static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 664static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
657{ 665{
666 struct drm_device *dev = connector->dev;
658 enum drm_connector_status status = connector_status_connected; 667 enum drm_connector_status status = connector_status_connected;
659 668
669 /* ACPI lid methods were generally unreliable in this generation, so
670 * don't even bother.
671 */
672 if (IS_GEN2(dev))
673 return connector_status_connected;
674
660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 675 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
661 status = connector_status_disconnected; 676 status = connector_status_disconnected;
662 677
@@ -1020,7 +1035,7 @@ void intel_lvds_init(struct drm_device *dev)
1020 return; 1035 return;
1021 } 1036 }
1022 1037
1023 if (IS_IRONLAKE(dev)) { 1038 if (HAS_PCH_SPLIT(dev)) {
1024 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 1039 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
1025 return; 1040 return;
1026 if (dev_priv->edp_support) { 1041 if (dev_priv->edp_support) {
@@ -1123,7 +1138,7 @@ void intel_lvds_init(struct drm_device *dev)
1123 */ 1138 */
1124 1139
1125 /* Ironlake: FIXME if still fail, not try pipe mode now */ 1140 /* Ironlake: FIXME if still fail, not try pipe mode now */
1126 if (IS_IRONLAKE(dev)) 1141 if (HAS_PCH_SPLIT(dev))
1127 goto failed; 1142 goto failed;
1128 1143
1129 lvds = I915_READ(LVDS); 1144 lvds = I915_READ(LVDS);
@@ -1144,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
1144 goto failed; 1159 goto failed;
1145 1160
1146out: 1161out:
1147 if (IS_IRONLAKE(dev)) { 1162 if (HAS_PCH_SPLIT(dev)) {
1148 u32 pwm; 1163 u32 pwm;
1149 /* make sure PWM is enabled */ 1164 /* make sure PWM is enabled */
1150 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1165 pwm = I915_READ(BLC_PWM_CPU_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2639591c72e9..d355d1d527e7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
172#define OFC_UPDATE 0x1 172#define OFC_UPDATE 0x1
173 173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) 174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) 175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
176 176
177 177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
199 199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{ 201{
202 struct drm_device *dev = overlay->dev;
203 drm_i915_private_t *dev_priv = dev->dev_private;
204
205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 202 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr); 203 io_mapping_unmap_atomic(overlay->virt_addr);
207 204
208 overlay->virt_addr = NULL; 205 overlay->virt_addr = NULL;
209 206
210 I915_READ(OVADD); /* flush wc cashes */
211
212 return; 207 return;
213} 208}
214 209
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
225 overlay->active = 1; 220 overlay->active = 1;
226 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; 221 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
227 222
228 BEGIN_LP_RING(6); 223 BEGIN_LP_RING(4);
229 OUT_RING(MI_FLUSH);
230 OUT_RING(MI_NOOP);
231 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 224 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
232 OUT_RING(overlay->flip_addr | OFC_UPDATE); 225 OUT_RING(overlay->flip_addr | OFC_UPDATE);
233 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 226 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
267 if (tmp & (1 << 17)) 260 if (tmp & (1 << 17))
268 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 261 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
269 262
270 BEGIN_LP_RING(4); 263 BEGIN_LP_RING(2);
271 OUT_RING(MI_FLUSH);
272 OUT_RING(MI_NOOP);
273 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 264 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
274 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
275 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
338 /* wait for overlay to go idle */ 329 /* wait for overlay to go idle */
339 overlay->hw_wedged = SWITCH_OFF_STAGE_1; 330 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
340 331
341 BEGIN_LP_RING(6); 332 BEGIN_LP_RING(4);
342 OUT_RING(MI_FLUSH);
343 OUT_RING(MI_NOOP);
344 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 333 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
345 OUT_RING(flip_addr); 334 OUT_RING(flip_addr);
346 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 335 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
358 /* turn overlay off */ 347 /* turn overlay off */
359 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 348 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
360 349
361 BEGIN_LP_RING(6); 350 BEGIN_LP_RING(4);
362 OUT_RING(MI_FLUSH);
363 OUT_RING(MI_NOOP);
364 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
365 OUT_RING(flip_addr); 352 OUT_RING(flip_addr);
366 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 353 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
435 422
436 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 423 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
437 424
438 BEGIN_LP_RING(6); 425 BEGIN_LP_RING(4);
439 OUT_RING(MI_FLUSH);
440 OUT_RING(MI_NOOP);
441 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
442 OUT_RING(flip_addr); 427 OUT_RING(flip_addr);
443 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -1179,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1179out_unlock: 1164out_unlock:
1180 mutex_unlock(&dev->struct_mutex); 1165 mutex_unlock(&dev->struct_mutex);
1181 mutex_unlock(&dev->mode_config.mutex); 1166 mutex_unlock(&dev->mode_config.mutex);
1182 drm_gem_object_unreference(new_bo); 1167 drm_gem_object_unreference_unlocked(new_bo);
1183 kfree(params); 1168 kfree(params);
1184 1169
1185 return ret; 1170 return ret;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 82678d30ab06..48daee5c9c63 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -35,6 +35,7 @@
35#include "i915_drm.h" 35#include "i915_drm.h"
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_sdvo_regs.h" 37#include "intel_sdvo_regs.h"
38#include <linux/dmi.h>
38 39
39static char *tv_format_names[] = { 40static char *tv_format_names[] = {
40 "NTSC_M" , "NTSC_J" , "NTSC_443", 41 "NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -2283,6 +2284,25 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2283 return 0x72; 2284 return 0x72;
2284} 2285}
2285 2286
2287static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
2288{
2289 DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
2290 return 1;
2291}
2292
2293static struct dmi_system_id intel_sdvo_bad_tv[] = {
2294 {
2295 .callback = intel_sdvo_bad_tv_callback,
2296 .ident = "IntelG45/ICH10R/DME1737",
2297 .matches = {
2298 DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
2299 DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
2300 },
2301 },
2302
2303 { } /* terminating entry */
2304};
2305
2286static bool 2306static bool
2287intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2307intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2288{ 2308{
@@ -2323,7 +2343,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2323 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2343 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2324 (1 << INTEL_ANALOG_CLONE_BIT); 2344 (1 << INTEL_ANALOG_CLONE_BIT);
2325 } 2345 }
2326 } else if (flags & SDVO_OUTPUT_SVID0) { 2346 } else if ((flags & SDVO_OUTPUT_SVID0) &&
2347 !dmi_check_system(intel_sdvo_bad_tv)) {
2327 2348
2328 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 2349 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
2329 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2350 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 48c290b5da8c..32db806f3b5a 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,7 +16,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ 16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \ 17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \ 18 nv40_graph.o nv50_graph.o \
19 nv40_grctx.o \ 19 nv40_grctx.o nv50_grctx.o \
20 nv04_instmem.o nv50_instmem.o \ 20 nv04_instmem.o nv50_instmem.o \
21 nv50_crtc.o nv50_dac.o nv50_sor.o \ 21 nv50_crtc.o nv50_dac.o nv50_sor.o \
22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48227e744753..0e0730a53137 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -11,6 +11,8 @@
11#include "nouveau_drm.h" 11#include "nouveau_drm.h"
12#include "nv50_display.h" 12#include "nv50_display.h"
13 13
14#include <linux/vga_switcheroo.h>
15
14#define NOUVEAU_DSM_SUPPORTED 0x00 16#define NOUVEAU_DSM_SUPPORTED 0x00
15#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00 17#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
16 18
@@ -28,31 +30,30 @@
28#define NOUVEAU_DSM_POWER_SPEED 0x01 30#define NOUVEAU_DSM_POWER_SPEED 0x01
29#define NOUVEAU_DSM_POWER_STAMINA 0x02 31#define NOUVEAU_DSM_POWER_STAMINA 0x02
30 32
31static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) 33static struct nouveau_dsm_priv {
32{ 34 bool dsm_detected;
33 static char muid[] = { 35 acpi_handle dhandle;
34 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 36 acpi_handle dsm_handle;
35 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, 37} nouveau_dsm_priv;
36 }; 38
39static const char nouveau_dsm_muid[] = {
40 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
41 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
42};
37 43
38 struct pci_dev *pdev = dev->pdev; 44static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
39 struct acpi_handle *handle; 45{
40 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 46 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
41 struct acpi_object_list input; 47 struct acpi_object_list input;
42 union acpi_object params[4]; 48 union acpi_object params[4];
43 union acpi_object *obj; 49 union acpi_object *obj;
44 int err; 50 int err;
45 51
46 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
47
48 if (!handle)
49 return -ENODEV;
50
51 input.count = 4; 52 input.count = 4;
52 input.pointer = params; 53 input.pointer = params;
53 params[0].type = ACPI_TYPE_BUFFER; 54 params[0].type = ACPI_TYPE_BUFFER;
54 params[0].buffer.length = sizeof(muid); 55 params[0].buffer.length = sizeof(nouveau_dsm_muid);
55 params[0].buffer.pointer = (char *)muid; 56 params[0].buffer.pointer = (char *)nouveau_dsm_muid;
56 params[1].type = ACPI_TYPE_INTEGER; 57 params[1].type = ACPI_TYPE_INTEGER;
57 params[1].integer.value = 0x00000102; 58 params[1].integer.value = 0x00000102;
58 params[2].type = ACPI_TYPE_INTEGER; 59 params[2].type = ACPI_TYPE_INTEGER;
@@ -62,7 +63,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
62 63
63 err = acpi_evaluate_object(handle, "_DSM", &input, &output); 64 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
64 if (err) { 65 if (err) {
65 NV_INFO(dev, "failed to evaluate _DSM: %d\n", err); 66 printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
66 return err; 67 return err;
67 } 68 }
68 69
@@ -86,40 +87,119 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
86 return 0; 87 return 0;
87} 88}
88 89
89int nouveau_hybrid_setup(struct drm_device *dev) 90static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
90{ 91{
91 int result; 92 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
92 93}
93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, 94
94 &result)) 95static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
95 return -ENODEV; 96{
96 97 int arg;
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 98 if (state == VGA_SWITCHEROO_ON)
98 99 arg = NOUVEAU_DSM_POWER_SPEED;
99 if (result) { /* Ensure that the external GPU is enabled */ 100 else
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); 101 arg = NOUVEAU_DSM_POWER_STAMINA;
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, 102 nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
102 NULL); 103 return 0;
103 } else { /* Stamina mode - disable the external GPU */ 104}
104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 105
105 NULL); 106static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 107{
107 NULL); 108 if (id == VGA_SWITCHEROO_IGD)
108 } 109 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
110 else
111 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
112}
109 113
114static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
115 enum vga_switcheroo_state state)
116{
117 if (id == VGA_SWITCHEROO_IGD)
118 return 0;
119
120 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
121}
122
123static int nouveau_dsm_init(void)
124{
110 return 0; 125 return 0;
111} 126}
112 127
113bool nouveau_dsm_probe(struct drm_device *dev) 128static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
114{ 129{
115 int support = 0; 130 if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
131 return VGA_SWITCHEROO_IGD;
132 else
133 return VGA_SWITCHEROO_DIS;
134}
135
136static struct vga_switcheroo_handler nouveau_dsm_handler = {
137 .switchto = nouveau_dsm_switchto,
138 .power_state = nouveau_dsm_power_state,
139 .init = nouveau_dsm_init,
140 .get_client_id = nouveau_dsm_get_client_id,
141};
116 142
117 if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED, 143static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
118 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support)) 144{
145 acpi_handle dhandle, nvidia_handle;
146 acpi_status status;
147 int ret;
148 uint32_t result;
149
150 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
151 if (!dhandle)
152 return false;
153 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
154 if (ACPI_FAILURE(status)) {
119 return false; 155 return false;
156 }
120 157
121 if (!support) 158 ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
159 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
160 if (ret < 0)
122 return false; 161 return false;
123 162
163 nouveau_dsm_priv.dhandle = dhandle;
164 nouveau_dsm_priv.dsm_handle = nvidia_handle;
124 return true; 165 return true;
125} 166}
167
168static bool nouveau_dsm_detect(void)
169{
170 char acpi_method_name[255] = { 0 };
171 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
172 struct pci_dev *pdev = NULL;
173 int has_dsm = 0;
174 int vga_count = 0;
175 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
176 vga_count++;
177
178 has_dsm |= (nouveau_dsm_pci_probe(pdev) == true);
179 }
180
181 if (vga_count == 2 && has_dsm) {
182 acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
183 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
184 acpi_method_name);
185 nouveau_dsm_priv.dsm_detected = true;
186 return true;
187 }
188 return false;
189}
190
191void nouveau_register_dsm_handler(void)
192{
193 bool r;
194
195 r = nouveau_dsm_detect();
196 if (!r)
197 return;
198
199 vga_switcheroo_register_handler(&nouveau_dsm_handler);
200}
201
202void nouveau_unregister_dsm_handler(void)
203{
204 vga_switcheroo_unregister_handler();
205}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0e9cd1d49130..71247da17da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -311,11 +311,11 @@ valid_reg(struct nvbios *bios, uint32_t reg)
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || 313 if (reg & 0x2 ||
314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) 314 (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 316
317 /* warn on C51 regs that haven't been verified accessible in tracing */ 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
321 reg); 321 reg);
@@ -420,7 +420,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
420 LOG_OLD_VALUE(bios_rd32(bios, reg)); 420 LOG_OLD_VALUE(bios_rd32(bios, reg));
421 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data); 421 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
422 422
423 if (dev_priv->VBIOS.execute) { 423 if (dev_priv->vbios.execute) {
424 still_alive(); 424 still_alive();
425 nv_wr32(bios->dev, reg, data); 425 nv_wr32(bios->dev, reg, data);
426 } 426 }
@@ -647,7 +647,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
647 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 647 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
648 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 648 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
649 649
650 if (dev_priv->VBIOS.execute) { 650 if (dev_priv->vbios.execute) {
651 still_alive(); 651 still_alive();
652 nv_wr32(dev, reg + 4, reg1); 652 nv_wr32(dev, reg + 4, reg1);
653 nv_wr32(dev, reg + 0, reg0); 653 nv_wr32(dev, reg + 0, reg0);
@@ -689,7 +689,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
689static int dcb_entry_idx_from_crtchead(struct drm_device *dev) 689static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
690{ 690{
691 struct drm_nouveau_private *dev_priv = dev->dev_private; 691 struct drm_nouveau_private *dev_priv = dev->dev_private;
692 struct nvbios *bios = &dev_priv->VBIOS; 692 struct nvbios *bios = &dev_priv->vbios;
693 693
694 /* 694 /*
695 * For the results of this function to be correct, CR44 must have been 695 * For the results of this function to be correct, CR44 must have been
@@ -700,7 +700,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
700 700
701 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0); 701 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
702 702
703 if (dcb_entry > bios->bdcb.dcb.entries) { 703 if (dcb_entry > bios->dcb.entries) {
704 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently " 704 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
705 "(%02X)\n", dcb_entry); 705 "(%02X)\n", dcb_entry);
706 dcb_entry = 0x7f; /* unused / invalid marker */ 706 dcb_entry = 0x7f; /* unused / invalid marker */
@@ -713,25 +713,26 @@ static struct nouveau_i2c_chan *
713init_i2c_device_find(struct drm_device *dev, int i2c_index) 713init_i2c_device_find(struct drm_device *dev, int i2c_index)
714{ 714{
715 struct drm_nouveau_private *dev_priv = dev->dev_private; 715 struct drm_nouveau_private *dev_priv = dev->dev_private;
716 struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb; 716 struct dcb_table *dcb = &dev_priv->vbios.dcb;
717 717
718 if (i2c_index == 0xff) { 718 if (i2c_index == 0xff) {
719 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ 719 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
720 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0; 720 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
721 int default_indices = bdcb->i2c_default_indices; 721 int default_indices = dcb->i2c_default_indices;
722 722
723 if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default) 723 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
724 shift = 4; 724 shift = 4;
725 725
726 i2c_index = (default_indices >> shift) & 0xf; 726 i2c_index = (default_indices >> shift) & 0xf;
727 } 727 }
728 if (i2c_index == 0x80) /* g80+ */ 728 if (i2c_index == 0x80) /* g80+ */
729 i2c_index = bdcb->i2c_default_indices & 0xf; 729 i2c_index = dcb->i2c_default_indices & 0xf;
730 730
731 return nouveau_i2c_find(dev, i2c_index); 731 return nouveau_i2c_find(dev, i2c_index);
732} 732}
733 733
734static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) 734static uint32_t
735get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
735{ 736{
736 /* 737 /*
737 * For mlv < 0x80, it is an index into a table of TMDS base addresses. 738 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
@@ -744,6 +745,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
744 */ 745 */
745 746
746 struct drm_nouveau_private *dev_priv = dev->dev_private; 747 struct drm_nouveau_private *dev_priv = dev->dev_private;
748 struct nvbios *bios = &dev_priv->vbios;
747 const int pramdac_offset[13] = { 749 const int pramdac_offset[13] = {
748 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 }; 750 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
749 const uint32_t pramdac_table[4] = { 751 const uint32_t pramdac_table[4] = {
@@ -756,13 +758,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
756 dcb_entry = dcb_entry_idx_from_crtchead(dev); 758 dcb_entry = dcb_entry_idx_from_crtchead(dev);
757 if (dcb_entry == 0x7f) 759 if (dcb_entry == 0x7f)
758 return 0; 760 return 0;
759 dacoffset = pramdac_offset[ 761 dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
760 dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
761 if (mlv == 0x81) 762 if (mlv == 0x81)
762 dacoffset ^= 8; 763 dacoffset ^= 8;
763 return 0x6808b0 + dacoffset; 764 return 0x6808b0 + dacoffset;
764 } else { 765 } else {
765 if (mlv > ARRAY_SIZE(pramdac_table)) { 766 if (mlv >= ARRAY_SIZE(pramdac_table)) {
766 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n", 767 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
767 mlv); 768 mlv);
768 return 0; 769 return 0;
@@ -2574,19 +2575,19 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2574 2575
2575 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 2576 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
2576 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; 2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2577 const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr]; 2578 const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
2578 const uint8_t *gpio_entry; 2579 const uint8_t *gpio_entry;
2579 int i; 2580 int i;
2580 2581
2581 if (!iexec->execute) 2582 if (!iexec->execute)
2582 return 1; 2583 return 1;
2583 2584
2584 if (bios->bdcb.version != 0x40) { 2585 if (bios->dcb.version != 0x40) {
2585 NV_ERROR(bios->dev, "DCB table not version 4.0\n"); 2586 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2586 return 0; 2587 return 0;
2587 } 2588 }
2588 2589
2589 if (!bios->bdcb.gpio_table_ptr) { 2590 if (!bios->dcb.gpio_table_ptr) {
2590 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); 2591 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
2591 return 0; 2592 return 0;
2592 } 2593 }
@@ -3123,7 +3124,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3123 struct dcb_entry *dcbent, int head, bool dl) 3124 struct dcb_entry *dcbent, int head, bool dl)
3124{ 3125{
3125 struct drm_nouveau_private *dev_priv = dev->dev_private; 3126 struct drm_nouveau_private *dev_priv = dev->dev_private;
3126 struct nvbios *bios = &dev_priv->VBIOS; 3127 struct nvbios *bios = &dev_priv->vbios;
3127 struct init_exec iexec = {true, false}; 3128 struct init_exec iexec = {true, false};
3128 3129
3129 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", 3130 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
@@ -3140,7 +3141,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3140static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) 3141static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
3141{ 3142{
3142 struct drm_nouveau_private *dev_priv = dev->dev_private; 3143 struct drm_nouveau_private *dev_priv = dev->dev_private;
3143 struct nvbios *bios = &dev_priv->VBIOS; 3144 struct nvbios *bios = &dev_priv->vbios;
3144 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); 3145 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
3145 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); 3146 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3146 3147
@@ -3194,7 +3195,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3194 * of a list of pxclks and script pointers. 3195 * of a list of pxclks and script pointers.
3195 */ 3196 */
3196 struct drm_nouveau_private *dev_priv = dev->dev_private; 3197 struct drm_nouveau_private *dev_priv = dev->dev_private;
3197 struct nvbios *bios = &dev_priv->VBIOS; 3198 struct nvbios *bios = &dev_priv->vbios;
3198 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 3199 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3199 uint16_t scriptptr = 0, clktable; 3200 uint16_t scriptptr = 0, clktable;
3200 uint8_t clktableptr = 0; 3201 uint8_t clktableptr = 0;
@@ -3261,7 +3262,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3261 */ 3262 */
3262 3263
3263 struct drm_nouveau_private *dev_priv = dev->dev_private; 3264 struct drm_nouveau_private *dev_priv = dev->dev_private;
3264 struct nvbios *bios = &dev_priv->VBIOS; 3265 struct nvbios *bios = &dev_priv->vbios;
3265 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 3266 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3266 uint32_t sel_clk_binding, sel_clk; 3267 uint32_t sel_clk_binding, sel_clk;
3267 int ret; 3268 int ret;
@@ -3395,7 +3396,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3395#ifndef __powerpc__ 3396#ifndef __powerpc__
3396 NV_ERROR(dev, "Pointer to flat panel table invalid\n"); 3397 NV_ERROR(dev, "Pointer to flat panel table invalid\n");
3397#endif 3398#endif
3398 bios->pub.digital_min_front_porch = 0x4b; 3399 bios->digital_min_front_porch = 0x4b;
3399 return 0; 3400 return 0;
3400 } 3401 }
3401 3402
@@ -3428,7 +3429,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3428 * fptable[4] is the minimum 3429 * fptable[4] is the minimum
3429 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap 3430 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
3430 */ 3431 */
3431 bios->pub.digital_min_front_porch = fptable[4]; 3432 bios->digital_min_front_porch = fptable[4];
3432 ofs = -7; 3433 ofs = -7;
3433 break; 3434 break;
3434 default: 3435 default:
@@ -3467,7 +3468,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3467 3468
3468 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ 3469 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
3469 if (lth.lvds_ver > 0x10) 3470 if (lth.lvds_ver > 0x10)
3470 bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; 3471 bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
3471 3472
3472 /* 3473 /*
3473 * If either the strap or xlated fpindex value are 0xf there is no 3474 * If either the strap or xlated fpindex value are 0xf there is no
@@ -3491,7 +3492,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3491bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) 3492bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
3492{ 3493{
3493 struct drm_nouveau_private *dev_priv = dev->dev_private; 3494 struct drm_nouveau_private *dev_priv = dev->dev_private;
3494 struct nvbios *bios = &dev_priv->VBIOS; 3495 struct nvbios *bios = &dev_priv->vbios;
3495 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; 3496 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
3496 3497
3497 if (!mode) /* just checking whether we can produce a mode */ 3498 if (!mode) /* just checking whether we can produce a mode */
@@ -3562,11 +3563,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
3562 * until later, when this function should be called with non-zero pxclk 3563 * until later, when this function should be called with non-zero pxclk
3563 */ 3564 */
3564 struct drm_nouveau_private *dev_priv = dev->dev_private; 3565 struct drm_nouveau_private *dev_priv = dev->dev_private;
3565 struct nvbios *bios = &dev_priv->VBIOS; 3566 struct nvbios *bios = &dev_priv->vbios;
3566 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; 3567 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
3567 struct lvdstableheader lth; 3568 struct lvdstableheader lth;
3568 uint16_t lvdsofs; 3569 uint16_t lvdsofs;
3569 int ret, chip_version = bios->pub.chip_version; 3570 int ret, chip_version = bios->chip_version;
3570 3571
3571 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth); 3572 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3572 if (ret) 3573 if (ret)
@@ -3682,7 +3683,7 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
3682 uint16_t record, int record_len, int record_nr) 3683 uint16_t record, int record_len, int record_nr)
3683{ 3684{
3684 struct drm_nouveau_private *dev_priv = dev->dev_private; 3685 struct drm_nouveau_private *dev_priv = dev->dev_private;
3685 struct nvbios *bios = &dev_priv->VBIOS; 3686 struct nvbios *bios = &dev_priv->vbios;
3686 uint32_t entry; 3687 uint32_t entry;
3687 uint16_t table; 3688 uint16_t table;
3688 int i, v; 3689 int i, v;
@@ -3716,7 +3717,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3716 int *length) 3717 int *length)
3717{ 3718{
3718 struct drm_nouveau_private *dev_priv = dev->dev_private; 3719 struct drm_nouveau_private *dev_priv = dev->dev_private;
3719 struct nvbios *bios = &dev_priv->VBIOS; 3720 struct nvbios *bios = &dev_priv->vbios;
3720 uint8_t *table; 3721 uint8_t *table;
3721 3722
3722 if (!bios->display.dp_table_ptr) { 3723 if (!bios->display.dp_table_ptr) {
@@ -3725,7 +3726,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3725 } 3726 }
3726 table = &bios->data[bios->display.dp_table_ptr]; 3727 table = &bios->data[bios->display.dp_table_ptr];
3727 3728
3728 if (table[0] != 0x21) { 3729 if (table[0] != 0x20 && table[0] != 0x21) {
3729 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n", 3730 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
3730 table[0]); 3731 table[0]);
3731 return NULL; 3732 return NULL;
@@ -3765,7 +3766,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3765 */ 3766 */
3766 3767
3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3768 struct drm_nouveau_private *dev_priv = dev->dev_private;
3768 struct nvbios *bios = &dev_priv->VBIOS; 3769 struct nvbios *bios = &dev_priv->vbios;
3769 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3770 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3770 uint8_t *otable = NULL; 3771 uint8_t *otable = NULL;
3771 uint16_t script; 3772 uint16_t script;
@@ -3918,8 +3919,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
3918 */ 3919 */
3919 3920
3920 struct drm_nouveau_private *dev_priv = dev->dev_private; 3921 struct drm_nouveau_private *dev_priv = dev->dev_private;
3921 struct nvbios *bios = &dev_priv->VBIOS; 3922 struct nvbios *bios = &dev_priv->vbios;
3922 int cv = bios->pub.chip_version; 3923 int cv = bios->chip_version;
3923 uint16_t clktable = 0, scriptptr; 3924 uint16_t clktable = 0, scriptptr;
3924 uint32_t sel_clk_binding, sel_clk; 3925 uint32_t sel_clk_binding, sel_clk;
3925 3926
@@ -3978,8 +3979,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
3978 */ 3979 */
3979 3980
3980 struct drm_nouveau_private *dev_priv = dev->dev_private; 3981 struct drm_nouveau_private *dev_priv = dev->dev_private;
3981 struct nvbios *bios = &dev_priv->VBIOS; 3982 struct nvbios *bios = &dev_priv->vbios;
3982 int cv = bios->pub.chip_version, pllindex = 0; 3983 int cv = bios->chip_version, pllindex = 0;
3983 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0; 3984 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
3984 uint32_t crystal_strap_mask, crystal_straps; 3985 uint32_t crystal_strap_mask, crystal_straps;
3985 3986
@@ -4332,7 +4333,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
4332 */ 4333 */
4333 4334
4334 bios->major_version = bios->data[offset + 3]; 4335 bios->major_version = bios->data[offset + 3];
4335 bios->pub.chip_version = bios->data[offset + 2]; 4336 bios->chip_version = bios->data[offset + 2];
4336 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", 4337 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
4337 bios->data[offset + 3], bios->data[offset + 2], 4338 bios->data[offset + 3], bios->data[offset + 2],
4338 bios->data[offset + 1], bios->data[offset]); 4339 bios->data[offset + 1], bios->data[offset]);
@@ -4402,7 +4403,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
4402 } 4403 }
4403 4404
4404 /* First entry is normal dac, 2nd tv-out perhaps? */ 4405 /* First entry is normal dac, 2nd tv-out perhaps? */
4405 bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; 4406 bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
4406 4407
4407 return 0; 4408 return 0;
4408} 4409}
@@ -4526,8 +4527,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
4526 return -ENOSYS; 4527 return -ENOSYS;
4527 } 4528 }
4528 4529
4529 bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); 4530 bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
4530 bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); 4531 bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
4531 4532
4532 return 0; 4533 return 0;
4533} 4534}
@@ -4796,11 +4797,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
4796 uint16_t legacy_scripts_offset, legacy_i2c_offset; 4797 uint16_t legacy_scripts_offset, legacy_i2c_offset;
4797 4798
4798 /* load needed defaults in case we can't parse this info */ 4799 /* load needed defaults in case we can't parse this info */
4799 bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; 4800 bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
4800 bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; 4801 bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
4801 bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; 4802 bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
4802 bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; 4803 bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
4803 bios->pub.digital_min_front_porch = 0x4b; 4804 bios->digital_min_front_porch = 0x4b;
4804 bios->fmaxvco = 256000; 4805 bios->fmaxvco = 256000;
4805 bios->fminvco = 128000; 4806 bios->fminvco = 128000;
4806 bios->fp.duallink_transition_clk = 90000; 4807 bios->fp.duallink_transition_clk = 90000;
@@ -4907,10 +4908,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
4907 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; 4908 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
4908 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; 4909 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
4909 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; 4910 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
4910 bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; 4911 bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
4911 bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; 4912 bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
4912 bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; 4913 bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
4913 bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; 4914 bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
4914 4915
4915 if (bmplength > 74) { 4916 if (bmplength > 74) {
4916 bios->fmaxvco = ROM32(bmp[67]); 4917 bios->fmaxvco = ROM32(bmp[67]);
@@ -4984,7 +4985,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
4984 else 4985 else
4985 NV_WARN(dev, 4986 NV_WARN(dev,
4986 "DCB I2C table has more entries than indexable " 4987 "DCB I2C table has more entries than indexable "
4987 "(%d entries, max index 15)\n", i2ctable[2]); 4988 "(%d entries, max %d)\n", i2ctable[2],
4989 DCB_MAX_NUM_I2C_ENTRIES);
4988 entry_len = i2ctable[3]; 4990 entry_len = i2ctable[3];
4989 /* [4] is i2c_default_indices, read in parse_dcb_table() */ 4991 /* [4] is i2c_default_indices, read in parse_dcb_table() */
4990 } 4992 }
@@ -5000,8 +5002,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
5000 5002
5001 if (index == 0xf) 5003 if (index == 0xf)
5002 return 0; 5004 return 0;
5003 if (index > i2c_entries) { 5005 if (index >= i2c_entries) {
5004 NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n", 5006 NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
5005 index, i2ctable[2]); 5007 index, i2ctable[2]);
5006 return -ENOENT; 5008 return -ENOENT;
5007 } 5009 }
@@ -5036,7 +5038,7 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
5036static struct dcb_gpio_entry * 5038static struct dcb_gpio_entry *
5037new_gpio_entry(struct nvbios *bios) 5039new_gpio_entry(struct nvbios *bios)
5038{ 5040{
5039 struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio; 5041 struct dcb_gpio_table *gpio = &bios->dcb.gpio;
5040 5042
5041 return &gpio->entry[gpio->entries++]; 5043 return &gpio->entry[gpio->entries++];
5042} 5044}
@@ -5045,14 +5047,14 @@ struct dcb_gpio_entry *
5045nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) 5047nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5046{ 5048{
5047 struct drm_nouveau_private *dev_priv = dev->dev_private; 5049 struct drm_nouveau_private *dev_priv = dev->dev_private;
5048 struct nvbios *bios = &dev_priv->VBIOS; 5050 struct nvbios *bios = &dev_priv->vbios;
5049 int i; 5051 int i;
5050 5052
5051 for (i = 0; i < bios->bdcb.gpio.entries; i++) { 5053 for (i = 0; i < bios->dcb.gpio.entries; i++) {
5052 if (bios->bdcb.gpio.entry[i].tag != tag) 5054 if (bios->dcb.gpio.entry[i].tag != tag)
5053 continue; 5055 continue;
5054 5056
5055 return &bios->bdcb.gpio.entry[i]; 5057 return &bios->dcb.gpio.entry[i];
5056 } 5058 }
5057 5059
5058 return NULL; 5060 return NULL;
@@ -5100,7 +5102,7 @@ static void
5100parse_dcb_gpio_table(struct nvbios *bios) 5102parse_dcb_gpio_table(struct nvbios *bios)
5101{ 5103{
5102 struct drm_device *dev = bios->dev; 5104 struct drm_device *dev = bios->dev;
5103 uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr; 5105 uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
5104 uint8_t *gpio_table = &bios->data[gpio_table_ptr]; 5106 uint8_t *gpio_table = &bios->data[gpio_table_ptr];
5105 int header_len = gpio_table[1], 5107 int header_len = gpio_table[1],
5106 entries = gpio_table[2], 5108 entries = gpio_table[2],
@@ -5108,7 +5110,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5108 void (*parse_entry)(struct nvbios *, uint16_t) = NULL; 5110 void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
5109 int i; 5111 int i;
5110 5112
5111 if (bios->bdcb.version >= 0x40) { 5113 if (bios->dcb.version >= 0x40) {
5112 if (gpio_table_ptr && entry_len != 4) { 5114 if (gpio_table_ptr && entry_len != 4) {
5113 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5115 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5114 return; 5116 return;
@@ -5116,7 +5118,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5116 5118
5117 parse_entry = parse_dcb40_gpio_entry; 5119 parse_entry = parse_dcb40_gpio_entry;
5118 5120
5119 } else if (bios->bdcb.version >= 0x30) { 5121 } else if (bios->dcb.version >= 0x30) {
5120 if (gpio_table_ptr && entry_len != 2) { 5122 if (gpio_table_ptr && entry_len != 2) {
5121 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5123 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5122 return; 5124 return;
@@ -5124,7 +5126,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5124 5126
5125 parse_entry = parse_dcb30_gpio_entry; 5127 parse_entry = parse_dcb30_gpio_entry;
5126 5128
5127 } else if (bios->bdcb.version >= 0x22) { 5129 } else if (bios->dcb.version >= 0x22) {
5128 /* 5130 /*
5129 * DCBs older than v3.0 don't really have a GPIO 5131 * DCBs older than v3.0 don't really have a GPIO
5130 * table, instead they keep some GPIO info at fixed 5132 * table, instead they keep some GPIO info at fixed
@@ -5158,30 +5160,67 @@ struct dcb_connector_table_entry *
5158nouveau_bios_connector_entry(struct drm_device *dev, int index) 5160nouveau_bios_connector_entry(struct drm_device *dev, int index)
5159{ 5161{
5160 struct drm_nouveau_private *dev_priv = dev->dev_private; 5162 struct drm_nouveau_private *dev_priv = dev->dev_private;
5161 struct nvbios *bios = &dev_priv->VBIOS; 5163 struct nvbios *bios = &dev_priv->vbios;
5162 struct dcb_connector_table_entry *cte; 5164 struct dcb_connector_table_entry *cte;
5163 5165
5164 if (index >= bios->bdcb.connector.entries) 5166 if (index >= bios->dcb.connector.entries)
5165 return NULL; 5167 return NULL;
5166 5168
5167 cte = &bios->bdcb.connector.entry[index]; 5169 cte = &bios->dcb.connector.entry[index];
5168 if (cte->type == 0xff) 5170 if (cte->type == 0xff)
5169 return NULL; 5171 return NULL;
5170 5172
5171 return cte; 5173 return cte;
5172} 5174}
5173 5175
5176static enum dcb_connector_type
5177divine_connector_type(struct nvbios *bios, int index)
5178{
5179 struct dcb_table *dcb = &bios->dcb;
5180 unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
5181 int i;
5182
5183 for (i = 0; i < dcb->entries; i++) {
5184 if (dcb->entry[i].connector == index)
5185 encoders |= (1 << dcb->entry[i].type);
5186 }
5187
5188 if (encoders & (1 << OUTPUT_DP)) {
5189 if (encoders & (1 << OUTPUT_TMDS))
5190 type = DCB_CONNECTOR_DP;
5191 else
5192 type = DCB_CONNECTOR_eDP;
5193 } else
5194 if (encoders & (1 << OUTPUT_TMDS)) {
5195 if (encoders & (1 << OUTPUT_ANALOG))
5196 type = DCB_CONNECTOR_DVI_I;
5197 else
5198 type = DCB_CONNECTOR_DVI_D;
5199 } else
5200 if (encoders & (1 << OUTPUT_ANALOG)) {
5201 type = DCB_CONNECTOR_VGA;
5202 } else
5203 if (encoders & (1 << OUTPUT_LVDS)) {
5204 type = DCB_CONNECTOR_LVDS;
5205 } else
5206 if (encoders & (1 << OUTPUT_TV)) {
5207 type = DCB_CONNECTOR_TV_0;
5208 }
5209
5210 return type;
5211}
5212
5174static void 5213static void
5175parse_dcb_connector_table(struct nvbios *bios) 5214parse_dcb_connector_table(struct nvbios *bios)
5176{ 5215{
5177 struct drm_device *dev = bios->dev; 5216 struct drm_device *dev = bios->dev;
5178 struct dcb_connector_table *ct = &bios->bdcb.connector; 5217 struct dcb_connector_table *ct = &bios->dcb.connector;
5179 struct dcb_connector_table_entry *cte; 5218 struct dcb_connector_table_entry *cte;
5180 uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr]; 5219 uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
5181 uint8_t *entry; 5220 uint8_t *entry;
5182 int i; 5221 int i;
5183 5222
5184 if (!bios->bdcb.connector_table_ptr) { 5223 if (!bios->dcb.connector_table_ptr) {
5185 NV_DEBUG_KMS(dev, "No DCB connector table present\n"); 5224 NV_DEBUG_KMS(dev, "No DCB connector table present\n");
5186 return; 5225 return;
5187 } 5226 }
@@ -5203,6 +5242,7 @@ parse_dcb_connector_table(struct nvbios *bios)
5203 cte->entry = ROM16(entry[0]); 5242 cte->entry = ROM16(entry[0]);
5204 else 5243 else
5205 cte->entry = ROM32(entry[0]); 5244 cte->entry = ROM32(entry[0]);
5245
5206 cte->type = (cte->entry & 0x000000ff) >> 0; 5246 cte->type = (cte->entry & 0x000000ff) >> 0;
5207 cte->index = (cte->entry & 0x00000f00) >> 8; 5247 cte->index = (cte->entry & 0x00000f00) >> 8;
5208 switch (cte->entry & 0x00033000) { 5248 switch (cte->entry & 0x00033000) {
@@ -5228,10 +5268,33 @@ parse_dcb_connector_table(struct nvbios *bios)
5228 5268
5229 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", 5269 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5230 i, cte->entry, cte->type, cte->index, cte->gpio_tag); 5270 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5271
5272 /* check for known types, fallback to guessing the type
5273 * from attached encoders if we hit an unknown.
5274 */
5275 switch (cte->type) {
5276 case DCB_CONNECTOR_VGA:
5277 case DCB_CONNECTOR_TV_0:
5278 case DCB_CONNECTOR_TV_1:
5279 case DCB_CONNECTOR_TV_3:
5280 case DCB_CONNECTOR_DVI_I:
5281 case DCB_CONNECTOR_DVI_D:
5282 case DCB_CONNECTOR_LVDS:
5283 case DCB_CONNECTOR_DP:
5284 case DCB_CONNECTOR_eDP:
5285 case DCB_CONNECTOR_HDMI_0:
5286 case DCB_CONNECTOR_HDMI_1:
5287 break;
5288 default:
5289 cte->type = divine_connector_type(bios, cte->index);
5290 NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
5291 break;
5292 }
5293
5231 } 5294 }
5232} 5295}
5233 5296
5234static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) 5297static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
5235{ 5298{
5236 struct dcb_entry *entry = &dcb->entry[dcb->entries]; 5299 struct dcb_entry *entry = &dcb->entry[dcb->entries];
5237 5300
@@ -5241,7 +5304,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
5241 return entry; 5304 return entry;
5242} 5305}
5243 5306
5244static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) 5307static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
5245{ 5308{
5246 struct dcb_entry *entry = new_dcb_entry(dcb); 5309 struct dcb_entry *entry = new_dcb_entry(dcb);
5247 5310
@@ -5252,7 +5315,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
5252 /* "or" mostly unused in early gen crt modesetting, 0 is fine */ 5315 /* "or" mostly unused in early gen crt modesetting, 0 is fine */
5253} 5316}
5254 5317
5255static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) 5318static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
5256{ 5319{
5257 struct dcb_entry *entry = new_dcb_entry(dcb); 5320 struct dcb_entry *entry = new_dcb_entry(dcb);
5258 5321
@@ -5279,7 +5342,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
5279#endif 5342#endif
5280} 5343}
5281 5344
5282static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) 5345static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
5283{ 5346{
5284 struct dcb_entry *entry = new_dcb_entry(dcb); 5347 struct dcb_entry *entry = new_dcb_entry(dcb);
5285 5348
@@ -5290,13 +5353,13 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
5290} 5353}
5291 5354
5292static bool 5355static bool
5293parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, 5356parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5294 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5357 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5295{ 5358{
5296 entry->type = conn & 0xf; 5359 entry->type = conn & 0xf;
5297 entry->i2c_index = (conn >> 4) & 0xf; 5360 entry->i2c_index = (conn >> 4) & 0xf;
5298 entry->heads = (conn >> 8) & 0xf; 5361 entry->heads = (conn >> 8) & 0xf;
5299 if (bdcb->version >= 0x40) 5362 if (dcb->version >= 0x40)
5300 entry->connector = (conn >> 12) & 0xf; 5363 entry->connector = (conn >> 12) & 0xf;
5301 entry->bus = (conn >> 16) & 0xf; 5364 entry->bus = (conn >> 16) & 0xf;
5302 entry->location = (conn >> 20) & 0x3; 5365 entry->location = (conn >> 20) & 0x3;
@@ -5314,7 +5377,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5314 * Although the rest of a CRT conf dword is usually 5377 * Although the rest of a CRT conf dword is usually
5315 * zeros, mac biosen have stuff there so we must mask 5378 * zeros, mac biosen have stuff there so we must mask
5316 */ 5379 */
5317 entry->crtconf.maxfreq = (bdcb->version < 0x30) ? 5380 entry->crtconf.maxfreq = (dcb->version < 0x30) ?
5318 (conf & 0xffff) * 10 : 5381 (conf & 0xffff) * 10 :
5319 (conf & 0xff) * 10000; 5382 (conf & 0xff) * 10000;
5320 break; 5383 break;
@@ -5323,7 +5386,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5323 uint32_t mask; 5386 uint32_t mask;
5324 if (conf & 0x1) 5387 if (conf & 0x1)
5325 entry->lvdsconf.use_straps_for_mode = true; 5388 entry->lvdsconf.use_straps_for_mode = true;
5326 if (bdcb->version < 0x22) { 5389 if (dcb->version < 0x22) {
5327 mask = ~0xd; 5390 mask = ~0xd;
5328 /* 5391 /*
5329 * The laptop in bug 14567 lies and claims to not use 5392 * The laptop in bug 14567 lies and claims to not use
@@ -5347,7 +5410,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5347 * Until we even try to use these on G8x, it's 5410 * Until we even try to use these on G8x, it's
5348 * useless reporting unknown bits. They all are. 5411 * useless reporting unknown bits. They all are.
5349 */ 5412 */
5350 if (bdcb->version >= 0x40) 5413 if (dcb->version >= 0x40)
5351 break; 5414 break;
5352 5415
5353 NV_ERROR(dev, "Unknown LVDS configuration bits, " 5416 NV_ERROR(dev, "Unknown LVDS configuration bits, "
@@ -5357,7 +5420,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5357 } 5420 }
5358 case OUTPUT_TV: 5421 case OUTPUT_TV:
5359 { 5422 {
5360 if (bdcb->version >= 0x30) 5423 if (dcb->version >= 0x30)
5361 entry->tvconf.has_component_output = conf & (0x8 << 4); 5424 entry->tvconf.has_component_output = conf & (0x8 << 4);
5362 else 5425 else
5363 entry->tvconf.has_component_output = false; 5426 entry->tvconf.has_component_output = false;
@@ -5384,8 +5447,10 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5384 break; 5447 break;
5385 case 0xe: 5448 case 0xe:
5386 /* weird g80 mobile type that "nv" treats as a terminator */ 5449 /* weird g80 mobile type that "nv" treats as a terminator */
5387 bdcb->dcb.entries--; 5450 dcb->entries--;
5388 return false; 5451 return false;
5452 default:
5453 break;
5389 } 5454 }
5390 5455
5391 /* unsure what DCB version introduces this, 3.0? */ 5456 /* unsure what DCB version introduces this, 3.0? */
@@ -5396,7 +5461,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5396} 5461}
5397 5462
5398static bool 5463static bool
5399parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5464parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5400 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5465 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5401{ 5466{
5402 switch (conn & 0x0000000f) { 5467 switch (conn & 0x0000000f) {
@@ -5462,27 +5527,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5462 return true; 5527 return true;
5463} 5528}
5464 5529
5465static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, 5530static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
5466 uint32_t conn, uint32_t conf) 5531 uint32_t conn, uint32_t conf)
5467{ 5532{
5468 struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb); 5533 struct dcb_entry *entry = new_dcb_entry(dcb);
5469 bool ret; 5534 bool ret;
5470 5535
5471 if (bdcb->version >= 0x20) 5536 if (dcb->version >= 0x20)
5472 ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry); 5537 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
5473 else 5538 else
5474 ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry); 5539 ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
5475 if (!ret) 5540 if (!ret)
5476 return ret; 5541 return ret;
5477 5542
5478 read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table, 5543 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
5479 entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]); 5544 entry->i2c_index, &dcb->i2c[entry->i2c_index]);
5480 5545
5481 return true; 5546 return true;
5482} 5547}
5483 5548
5484static 5549static
5485void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) 5550void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5486{ 5551{
5487 /* 5552 /*
5488 * DCB v2.0 lists each output combination separately. 5553 * DCB v2.0 lists each output combination separately.
@@ -5534,8 +5599,7 @@ static int
5534parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5599parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5535{ 5600{
5536 struct drm_nouveau_private *dev_priv = dev->dev_private; 5601 struct drm_nouveau_private *dev_priv = dev->dev_private;
5537 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5602 struct dcb_table *dcb = &bios->dcb;
5538 struct parsed_dcb *dcb;
5539 uint16_t dcbptr = 0, i2ctabptr = 0; 5603 uint16_t dcbptr = 0, i2ctabptr = 0;
5540 uint8_t *dcbtable; 5604 uint8_t *dcbtable;
5541 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5605 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
@@ -5543,9 +5607,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5543 int recordlength = 8, confofs = 4; 5607 int recordlength = 8, confofs = 4;
5544 int i; 5608 int i;
5545 5609
5546 dcb = bios->pub.dcb = &bdcb->dcb;
5547 dcb->entries = 0;
5548
5549 /* get the offset from 0x36 */ 5610 /* get the offset from 0x36 */
5550 if (dev_priv->card_type > NV_04) { 5611 if (dev_priv->card_type > NV_04) {
5551 dcbptr = ROM16(bios->data[0x36]); 5612 dcbptr = ROM16(bios->data[0x36]);
@@ -5567,21 +5628,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5567 dcbtable = &bios->data[dcbptr]; 5628 dcbtable = &bios->data[dcbptr];
5568 5629
5569 /* get DCB version */ 5630 /* get DCB version */
5570 bdcb->version = dcbtable[0]; 5631 dcb->version = dcbtable[0];
5571 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n", 5632 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
5572 bdcb->version >> 4, bdcb->version & 0xf); 5633 dcb->version >> 4, dcb->version & 0xf);
5573 5634
5574 if (bdcb->version >= 0x20) { /* NV17+ */ 5635 if (dcb->version >= 0x20) { /* NV17+ */
5575 uint32_t sig; 5636 uint32_t sig;
5576 5637
5577 if (bdcb->version >= 0x30) { /* NV40+ */ 5638 if (dcb->version >= 0x30) { /* NV40+ */
5578 headerlen = dcbtable[1]; 5639 headerlen = dcbtable[1];
5579 entries = dcbtable[2]; 5640 entries = dcbtable[2];
5580 recordlength = dcbtable[3]; 5641 recordlength = dcbtable[3];
5581 i2ctabptr = ROM16(dcbtable[4]); 5642 i2ctabptr = ROM16(dcbtable[4]);
5582 sig = ROM32(dcbtable[6]); 5643 sig = ROM32(dcbtable[6]);
5583 bdcb->gpio_table_ptr = ROM16(dcbtable[10]); 5644 dcb->gpio_table_ptr = ROM16(dcbtable[10]);
5584 bdcb->connector_table_ptr = ROM16(dcbtable[20]); 5645 dcb->connector_table_ptr = ROM16(dcbtable[20]);
5585 } else { 5646 } else {
5586 i2ctabptr = ROM16(dcbtable[2]); 5647 i2ctabptr = ROM16(dcbtable[2]);
5587 sig = ROM32(dcbtable[4]); 5648 sig = ROM32(dcbtable[4]);
@@ -5593,7 +5654,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5593 "signature (%08X)\n", sig); 5654 "signature (%08X)\n", sig);
5594 return -EINVAL; 5655 return -EINVAL;
5595 } 5656 }
5596 } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */ 5657 } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
5597 char sig[8] = { 0 }; 5658 char sig[8] = { 0 };
5598 5659
5599 strncpy(sig, (char *)&dcbtable[-7], 7); 5660 strncpy(sig, (char *)&dcbtable[-7], 7);
@@ -5641,14 +5702,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5641 if (!i2ctabptr) 5702 if (!i2ctabptr)
5642 NV_WARN(dev, "No pointer to DCB I2C port table\n"); 5703 NV_WARN(dev, "No pointer to DCB I2C port table\n");
5643 else { 5704 else {
5644 bdcb->i2c_table = &bios->data[i2ctabptr]; 5705 dcb->i2c_table = &bios->data[i2ctabptr];
5645 if (bdcb->version >= 0x30) 5706 if (dcb->version >= 0x30)
5646 bdcb->i2c_default_indices = bdcb->i2c_table[4]; 5707 dcb->i2c_default_indices = dcb->i2c_table[4];
5647 } 5708 }
5648 5709
5649 parse_dcb_gpio_table(bios);
5650 parse_dcb_connector_table(bios);
5651
5652 if (entries > DCB_MAX_NUM_ENTRIES) 5710 if (entries > DCB_MAX_NUM_ENTRIES)
5653 entries = DCB_MAX_NUM_ENTRIES; 5711 entries = DCB_MAX_NUM_ENTRIES;
5654 5712
@@ -5673,7 +5731,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5673 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", 5731 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
5674 dcb->entries, connection, config); 5732 dcb->entries, connection, config);
5675 5733
5676 if (!parse_dcb_entry(dev, bdcb, connection, config)) 5734 if (!parse_dcb_entry(dev, dcb, connection, config))
5677 break; 5735 break;
5678 } 5736 }
5679 5737
@@ -5681,18 +5739,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5681 * apart for v2.1+ not being known for requiring merging, this 5739 * apart for v2.1+ not being known for requiring merging, this
5682 * guarantees dcbent->index is the index of the entry in the rom image 5740 * guarantees dcbent->index is the index of the entry in the rom image
5683 */ 5741 */
5684 if (bdcb->version < 0x21) 5742 if (dcb->version < 0x21)
5685 merge_like_dcb_entries(dev, dcb); 5743 merge_like_dcb_entries(dev, dcb);
5686 5744
5687 return dcb->entries ? 0 : -ENXIO; 5745 if (!dcb->entries)
5746 return -ENXIO;
5747
5748 parse_dcb_gpio_table(bios);
5749 parse_dcb_connector_table(bios);
5750 return 0;
5688} 5751}
5689 5752
5690static void 5753static void
5691fixup_legacy_connector(struct nvbios *bios) 5754fixup_legacy_connector(struct nvbios *bios)
5692{ 5755{
5693 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5756 struct dcb_table *dcb = &bios->dcb;
5694 struct parsed_dcb *dcb = &bdcb->dcb; 5757 int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
5695 int high = 0, i;
5696 5758
5697 /* 5759 /*
5698 * DCB 3.0 also has the table in most cases, but there are some cards 5760 * DCB 3.0 also has the table in most cases, but there are some cards
@@ -5700,9 +5762,11 @@ fixup_legacy_connector(struct nvbios *bios)
5700 * indices are all 0. We don't need the connector indices on pre-G80 5762 * indices are all 0. We don't need the connector indices on pre-G80
5701 * chips (yet?) so limit the use to DCB 4.0 and above. 5763 * chips (yet?) so limit the use to DCB 4.0 and above.
5702 */ 5764 */
5703 if (bdcb->version >= 0x40) 5765 if (dcb->version >= 0x40)
5704 return; 5766 return;
5705 5767
5768 dcb->connector.entries = 0;
5769
5706 /* 5770 /*
5707 * No known connector info before v3.0, so make it up. the rule here 5771 * No known connector info before v3.0, so make it up. the rule here
5708 * is: anything on the same i2c bus is considered to be on the same 5772 * is: anything on the same i2c bus is considered to be on the same
@@ -5710,37 +5774,38 @@ fixup_legacy_connector(struct nvbios *bios)
5710 * its own unique connector index. 5774 * its own unique connector index.
5711 */ 5775 */
5712 for (i = 0; i < dcb->entries; i++) { 5776 for (i = 0; i < dcb->entries; i++) {
5713 if (dcb->entry[i].i2c_index == 0xf)
5714 continue;
5715
5716 /* 5777 /*
5717 * Ignore the I2C index for on-chip TV-out, as there 5778 * Ignore the I2C index for on-chip TV-out, as there
5718 * are cards with bogus values (nv31m in bug 23212), 5779 * are cards with bogus values (nv31m in bug 23212),
5719 * and it's otherwise useless. 5780 * and it's otherwise useless.
5720 */ 5781 */
5721 if (dcb->entry[i].type == OUTPUT_TV && 5782 if (dcb->entry[i].type == OUTPUT_TV &&
5722 dcb->entry[i].location == DCB_LOC_ON_CHIP) { 5783 dcb->entry[i].location == DCB_LOC_ON_CHIP)
5723 dcb->entry[i].i2c_index = 0xf; 5784 dcb->entry[i].i2c_index = 0xf;
5785 i2c = dcb->entry[i].i2c_index;
5786
5787 if (i2c_conn[i2c]) {
5788 dcb->entry[i].connector = i2c_conn[i2c] - 1;
5724 continue; 5789 continue;
5725 } 5790 }
5726 5791
5727 dcb->entry[i].connector = dcb->entry[i].i2c_index; 5792 dcb->entry[i].connector = dcb->connector.entries++;
5728 if (dcb->entry[i].connector > high) 5793 if (i2c != 0xf)
5729 high = dcb->entry[i].connector; 5794 i2c_conn[i2c] = dcb->connector.entries;
5730 } 5795 }
5731 5796
5732 for (i = 0; i < dcb->entries; i++) { 5797 /* Fake the connector table as well as just connector indices */
5733 if (dcb->entry[i].i2c_index != 0xf) 5798 for (i = 0; i < dcb->connector.entries; i++) {
5734 continue; 5799 dcb->connector.entry[i].index = i;
5735 5800 dcb->connector.entry[i].type = divine_connector_type(bios, i);
5736 dcb->entry[i].connector = ++high; 5801 dcb->connector.entry[i].gpio_tag = 0xff;
5737 } 5802 }
5738} 5803}
5739 5804
5740static void 5805static void
5741fixup_legacy_i2c(struct nvbios *bios) 5806fixup_legacy_i2c(struct nvbios *bios)
5742{ 5807{
5743 struct parsed_dcb *dcb = &bios->bdcb.dcb; 5808 struct dcb_table *dcb = &bios->dcb;
5744 int i; 5809 int i;
5745 5810
5746 for (i = 0; i < dcb->entries; i++) { 5811 for (i = 0; i < dcb->entries; i++) {
@@ -5826,7 +5891,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
5826uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) 5891uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
5827{ 5892{
5828 struct drm_nouveau_private *dev_priv = dev->dev_private; 5893 struct drm_nouveau_private *dev_priv = dev->dev_private;
5829 struct nvbios *bios = &dev_priv->VBIOS; 5894 struct nvbios *bios = &dev_priv->vbios;
5830 const uint8_t edid_sig[] = { 5895 const uint8_t edid_sig[] = {
5831 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 5896 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
5832 uint16_t offset = 0; 5897 uint16_t offset = 0;
@@ -5859,7 +5924,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5859 struct dcb_entry *dcbent) 5924 struct dcb_entry *dcbent)
5860{ 5925{
5861 struct drm_nouveau_private *dev_priv = dev->dev_private; 5926 struct drm_nouveau_private *dev_priv = dev->dev_private;
5862 struct nvbios *bios = &dev_priv->VBIOS; 5927 struct nvbios *bios = &dev_priv->vbios;
5863 struct init_exec iexec = { true, false }; 5928 struct init_exec iexec = { true, false };
5864 5929
5865 mutex_lock(&bios->lock); 5930 mutex_lock(&bios->lock);
@@ -5872,7 +5937,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5872static bool NVInitVBIOS(struct drm_device *dev) 5937static bool NVInitVBIOS(struct drm_device *dev)
5873{ 5938{
5874 struct drm_nouveau_private *dev_priv = dev->dev_private; 5939 struct drm_nouveau_private *dev_priv = dev->dev_private;
5875 struct nvbios *bios = &dev_priv->VBIOS; 5940 struct nvbios *bios = &dev_priv->vbios;
5876 5941
5877 memset(bios, 0, sizeof(struct nvbios)); 5942 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock); 5943 mutex_init(&bios->lock);
@@ -5888,7 +5953,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5888static int nouveau_parse_vbios_struct(struct drm_device *dev) 5953static int nouveau_parse_vbios_struct(struct drm_device *dev)
5889{ 5954{
5890 struct drm_nouveau_private *dev_priv = dev->dev_private; 5955 struct drm_nouveau_private *dev_priv = dev->dev_private;
5891 struct nvbios *bios = &dev_priv->VBIOS; 5956 struct nvbios *bios = &dev_priv->vbios;
5892 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; 5957 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
5893 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; 5958 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
5894 int offset; 5959 int offset;
@@ -5915,7 +5980,7 @@ int
5915nouveau_run_vbios_init(struct drm_device *dev) 5980nouveau_run_vbios_init(struct drm_device *dev)
5916{ 5981{
5917 struct drm_nouveau_private *dev_priv = dev->dev_private; 5982 struct drm_nouveau_private *dev_priv = dev->dev_private;
5918 struct nvbios *bios = &dev_priv->VBIOS; 5983 struct nvbios *bios = &dev_priv->vbios;
5919 int i, ret = 0; 5984 int i, ret = 0;
5920 5985
5921 NVLockVgaCrtcs(dev, false); 5986 NVLockVgaCrtcs(dev, false);
@@ -5946,9 +6011,9 @@ nouveau_run_vbios_init(struct drm_device *dev)
5946 } 6011 }
5947 6012
5948 if (dev_priv->card_type >= NV_50) { 6013 if (dev_priv->card_type >= NV_50) {
5949 for (i = 0; i < bios->bdcb.dcb.entries; i++) { 6014 for (i = 0; i < bios->dcb.entries; i++) {
5950 nouveau_bios_run_display_table(dev, 6015 nouveau_bios_run_display_table(dev,
5951 &bios->bdcb.dcb.entry[i], 6016 &bios->dcb.entry[i],
5952 0, 0); 6017 0, 0);
5953 } 6018 }
5954 } 6019 }
@@ -5962,11 +6027,11 @@ static void
5962nouveau_bios_i2c_devices_takedown(struct drm_device *dev) 6027nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
5963{ 6028{
5964 struct drm_nouveau_private *dev_priv = dev->dev_private; 6029 struct drm_nouveau_private *dev_priv = dev->dev_private;
5965 struct nvbios *bios = &dev_priv->VBIOS; 6030 struct nvbios *bios = &dev_priv->vbios;
5966 struct dcb_i2c_entry *entry; 6031 struct dcb_i2c_entry *entry;
5967 int i; 6032 int i;
5968 6033
5969 entry = &bios->bdcb.dcb.i2c[0]; 6034 entry = &bios->dcb.i2c[0];
5970 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++) 6035 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
5971 nouveau_i2c_fini(dev, entry); 6036 nouveau_i2c_fini(dev, entry);
5972} 6037}
@@ -5975,13 +6040,11 @@ int
5975nouveau_bios_init(struct drm_device *dev) 6040nouveau_bios_init(struct drm_device *dev)
5976{ 6041{
5977 struct drm_nouveau_private *dev_priv = dev->dev_private; 6042 struct drm_nouveau_private *dev_priv = dev->dev_private;
5978 struct nvbios *bios = &dev_priv->VBIOS; 6043 struct nvbios *bios = &dev_priv->vbios;
5979 uint32_t saved_nv_pextdev_boot_0; 6044 uint32_t saved_nv_pextdev_boot_0;
5980 bool was_locked; 6045 bool was_locked;
5981 int ret; 6046 int ret;
5982 6047
5983 dev_priv->vbios = &bios->pub;
5984
5985 if (!NVInitVBIOS(dev)) 6048 if (!NVInitVBIOS(dev))
5986 return -ENODEV; 6049 return -ENODEV;
5987 6050
@@ -6023,10 +6086,8 @@ nouveau_bios_init(struct drm_device *dev)
6023 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0); 6086 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
6024 6087
6025 ret = nouveau_run_vbios_init(dev); 6088 ret = nouveau_run_vbios_init(dev);
6026 if (ret) { 6089 if (ret)
6027 dev_priv->vbios = NULL;
6028 return ret; 6090 return ret;
6029 }
6030 6091
6031 /* feature_byte on BMP is poor, but init always sets CR4B */ 6092 /* feature_byte on BMP is poor, but init always sets CR4B */
6032 was_locked = NVLockVgaCrtcs(dev, false); 6093 was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd94bd6dc264..9f688aa9a655 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,67 @@
34 34
35#define DCB_LOC_ON_CHIP 0 35#define DCB_LOC_ON_CHIP 0
36 36
37struct dcb_i2c_entry {
38 uint8_t port_type;
39 uint8_t read, write;
40 struct nouveau_i2c_chan *chan;
41};
42
43enum dcb_gpio_tag {
44 DCB_GPIO_TVDAC0 = 0xc,
45 DCB_GPIO_TVDAC1 = 0x2d,
46};
47
48struct dcb_gpio_entry {
49 enum dcb_gpio_tag tag;
50 int line;
51 bool invert;
52};
53
54struct dcb_gpio_table {
55 int entries;
56 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
57};
58
59enum dcb_connector_type {
60 DCB_CONNECTOR_VGA = 0x00,
61 DCB_CONNECTOR_TV_0 = 0x10,
62 DCB_CONNECTOR_TV_1 = 0x11,
63 DCB_CONNECTOR_TV_3 = 0x13,
64 DCB_CONNECTOR_DVI_I = 0x30,
65 DCB_CONNECTOR_DVI_D = 0x31,
66 DCB_CONNECTOR_LVDS = 0x40,
67 DCB_CONNECTOR_DP = 0x46,
68 DCB_CONNECTOR_eDP = 0x47,
69 DCB_CONNECTOR_HDMI_0 = 0x60,
70 DCB_CONNECTOR_HDMI_1 = 0x61,
71 DCB_CONNECTOR_NONE = 0xff
72};
73
74struct dcb_connector_table_entry {
75 uint32_t entry;
76 enum dcb_connector_type type;
77 uint8_t index;
78 uint8_t gpio_tag;
79};
80
81struct dcb_connector_table {
82 int entries;
83 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
84};
85
86enum dcb_type {
87 OUTPUT_ANALOG = 0,
88 OUTPUT_TV = 1,
89 OUTPUT_TMDS = 2,
90 OUTPUT_LVDS = 3,
91 OUTPUT_DP = 6,
92 OUTPUT_ANY = -1
93};
94
37struct dcb_entry { 95struct dcb_entry {
38 int index; /* may not be raw dcb index if merging has happened */ 96 int index; /* may not be raw dcb index if merging has happened */
39 uint8_t type; 97 enum dcb_type type;
40 uint8_t i2c_index; 98 uint8_t i2c_index;
41 uint8_t heads; 99 uint8_t heads;
42 uint8_t connector; 100 uint8_t connector;
@@ -71,69 +129,22 @@ struct dcb_entry {
71 bool i2c_upper_default; 129 bool i2c_upper_default;
72}; 130};
73 131
74struct dcb_i2c_entry { 132struct dcb_table {
75 uint8_t port_type; 133 uint8_t version;
76 uint8_t read, write;
77 struct nouveau_i2c_chan *chan;
78};
79 134
80struct parsed_dcb {
81 int entries; 135 int entries;
82 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; 136 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
83 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
84};
85
86enum dcb_gpio_tag {
87 DCB_GPIO_TVDAC0 = 0xc,
88 DCB_GPIO_TVDAC1 = 0x2d,
89};
90
91struct dcb_gpio_entry {
92 enum dcb_gpio_tag tag;
93 int line;
94 bool invert;
95};
96
97struct parsed_dcb_gpio {
98 int entries;
99 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
100};
101
102struct dcb_connector_table_entry {
103 uint32_t entry;
104 uint8_t type;
105 uint8_t index;
106 uint8_t gpio_tag;
107};
108
109struct dcb_connector_table {
110 int entries;
111 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
112};
113
114struct bios_parsed_dcb {
115 uint8_t version;
116
117 struct parsed_dcb dcb;
118 137
119 uint8_t *i2c_table; 138 uint8_t *i2c_table;
120 uint8_t i2c_default_indices; 139 uint8_t i2c_default_indices;
140 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
121 141
122 uint16_t gpio_table_ptr; 142 uint16_t gpio_table_ptr;
123 struct parsed_dcb_gpio gpio; 143 struct dcb_gpio_table gpio;
124 uint16_t connector_table_ptr; 144 uint16_t connector_table_ptr;
125 struct dcb_connector_table connector; 145 struct dcb_connector_table connector;
126}; 146};
127 147
128enum nouveau_encoder_type {
129 OUTPUT_ANALOG = 0,
130 OUTPUT_TV = 1,
131 OUTPUT_TMDS = 2,
132 OUTPUT_LVDS = 3,
133 OUTPUT_DP = 6,
134 OUTPUT_ANY = -1
135};
136
137enum nouveau_or { 148enum nouveau_or {
138 OUTPUT_A = (1 << 0), 149 OUTPUT_A = (1 << 0),
139 OUTPUT_B = (1 << 1), 150 OUTPUT_B = (1 << 1),
@@ -190,8 +201,8 @@ struct pll_lims {
190 int refclk; 201 int refclk;
191}; 202};
192 203
193struct nouveau_bios_info { 204struct nvbios {
194 struct parsed_dcb *dcb; 205 struct drm_device *dev;
195 206
196 uint8_t chip_version; 207 uint8_t chip_version;
197 208
@@ -199,11 +210,6 @@ struct nouveau_bios_info {
199 uint32_t tvdactestval; 210 uint32_t tvdactestval;
200 uint8_t digital_min_front_porch; 211 uint8_t digital_min_front_porch;
201 bool fp_no_ddc; 212 bool fp_no_ddc;
202};
203
204struct nvbios {
205 struct drm_device *dev;
206 struct nouveau_bios_info pub;
207 213
208 struct mutex lock; 214 struct mutex lock;
209 215
@@ -234,7 +240,7 @@ struct nvbios {
234 uint16_t some_script_ptr; /* BIT I + 14 */ 240 uint16_t some_script_ptr; /* BIT I + 14 */
235 uint16_t init96_tbl_ptr; /* BIT I + 16 */ 241 uint16_t init96_tbl_ptr; /* BIT I + 16 */
236 242
237 struct bios_parsed_dcb bdcb; 243 struct dcb_table dcb;
238 244
239 struct { 245 struct {
240 int crtchead; 246 int crtchead;
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ee2b84504d05..88f9bc0941eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
274 * returns calculated clock 274 * returns calculated clock
275 */ 275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private; 276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios->chip_version; 277 int cv = dev_priv->vbios.chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq; 278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m; 279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n; 280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
@@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
373 * returns calculated clock 373 * returns calculated clock
374 */ 374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios->chip_version; 376 int chip_version = dev_priv->vbios.chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq; 377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq; 378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq; 379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 2281f99da7fc..6dfb425cbae9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo; 36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL; 37 struct nouveau_gpuobj *pushbuf = NULL;
38 uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
39 int ret; 38 int ret;
40 39
40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset;
45 } else
41 if (pb->bo.mem.mem_type == TTM_PL_TT) { 46 if (pb->bo.mem.mem_type == TTM_PL_TT) {
42 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 47 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
43 dev_priv->gart_info.aper_size, 48 dev_priv->gart_info.aper_size,
44 NV_DMA_ACCESS_RO, &pushbuf, 49 NV_DMA_ACCESS_RO, &pushbuf,
45 NULL); 50 NULL);
46 chan->pushbuf_base = start; 51 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
47 } else 52 } else
48 if (dev_priv->card_type != NV_04) { 53 if (dev_priv->card_type != NV_04) {
49 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
50 dev_priv->fb_available_size, 55 dev_priv->fb_available_size,
51 NV_DMA_ACCESS_RO, 56 NV_DMA_ACCESS_RO,
52 NV_DMA_TARGET_VIDMEM, &pushbuf); 57 NV_DMA_TARGET_VIDMEM, &pushbuf);
53 chan->pushbuf_base = start; 58 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
54 } else { 59 } else {
55 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
56 * exact reason for existing :) PCI access to cmdbuf in 61 * exact reason for existing :) PCI access to cmdbuf in
@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
61 dev_priv->fb_available_size, 66 dev_priv->fb_available_size,
62 NV_DMA_ACCESS_RO, 67 NV_DMA_ACCESS_RO,
63 NV_DMA_TARGET_PCI, &pushbuf); 68 NV_DMA_TARGET_PCI, &pushbuf);
64 chan->pushbuf_base = start; 69 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
65 } 70 }
66 71
67 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); 72 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
@@ -275,9 +280,18 @@ nouveau_channel_free(struct nouveau_channel *chan)
275 */ 280 */
276 nouveau_fence_fini(chan); 281 nouveau_fence_fini(chan);
277 282
278 /* Ensure the channel is no longer active on the GPU */ 283 /* This will prevent pfifo from switching channels. */
279 pfifo->reassign(dev, false); 284 pfifo->reassign(dev, false);
280 285
286 /* We want to give pgraph a chance to idle and get rid of all potential
287 * errors. We need to do this before the lock, otherwise the irq handler
288 * is unable to process them.
289 */
290 if (pgraph->channel(dev) == chan)
291 nouveau_wait_for_idle(dev);
292
293 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
294
281 pgraph->fifo_access(dev, false); 295 pgraph->fifo_access(dev, false);
282 if (pgraph->channel(dev) == chan) 296 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 297 pgraph->unload_context(dev);
@@ -293,6 +307,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
293 307
294 pfifo->reassign(dev, true); 308 pfifo->reassign(dev, true);
295 309
310 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
311
296 /* Release the channel's resources */ 312 /* Release the channel's resources */
297 nouveau_gpuobj_ref_del(dev, &chan->pushbuf); 313 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
298 if (chan->pushbuf_bo) { 314 if (chan->pushbuf_bo) {
@@ -369,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
369 return ret; 385 return ret;
370 init->channel = chan->id; 386 init->channel = chan->id;
371 387
388 if (chan->dma.ib_max)
389 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
390 NOUVEAU_GEM_DOMAIN_GART;
391 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
392 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
393 else
394 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
395
372 init->subchan[0].handle = NvM2MF; 396 init->subchan[0].handle = NvM2MF;
373 if (dev_priv->card_type < NV_50) 397 if (dev_priv->card_type < NV_50)
374 init->subchan[0].grclass = 0x0039; 398 init->subchan[0].grclass = 0x0039;
@@ -408,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
408 ***********************************/ 432 ***********************************/
409 433
410struct drm_ioctl_desc nouveau_ioctls[] = { 434struct drm_ioctl_desc nouveau_ioctls[] = {
411 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
412 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 435 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
413 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 436 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
414 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 437 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -418,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
418 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 441 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
419 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
420 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
421 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
422 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
423 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
424 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 444 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
425 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 445 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
426 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 446 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
427 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
428}; 447};
429 448
430int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 449int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d2f63353ea97..24327f468c4b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
218 connector->interlace_allowed = true; 218 connector->interlace_allowed = true;
219 } 219 }
220 220
221 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 221 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
222 drm_connector_property_set_value(connector, 222 drm_connector_property_set_value(connector,
223 dev->mode_config.dvi_i_subconnector_property, 223 dev->mode_config.dvi_i_subconnector_property,
224 nv_encoder->dcb->type == OUTPUT_TMDS ? 224 nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -236,15 +236,17 @@ nouveau_connector_detect(struct drm_connector *connector)
236 struct nouveau_i2c_chan *i2c; 236 struct nouveau_i2c_chan *i2c;
237 int type, flags; 237 int type, flags;
238 238
239 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 239 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
241 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242 unsigned status = connector_status_connected;
243
242#ifdef CONFIG_ACPI 244#ifdef CONFIG_ACPI
243 if (!nouveau_ignorelid && !acpi_lid_open()) 245 if (!nouveau_ignorelid && !acpi_lid_open())
244 return connector_status_disconnected; 246 status = connector_status_unknown;
245#endif 247#endif
246 nouveau_connector_set_encoder(connector, nv_encoder); 248 nouveau_connector_set_encoder(connector, nv_encoder);
247 return connector_status_connected; 249 return status;
248 } 250 }
249 251
250 /* Cleanup the previous EDID block. */ 252 /* Cleanup the previous EDID block. */
@@ -279,7 +281,7 @@ nouveau_connector_detect(struct drm_connector *connector)
279 * same i2c channel so the value returned from ddc_detect 281 * same i2c channel so the value returned from ddc_detect
280 * isn't necessarily correct. 282 * isn't necessarily correct.
281 */ 283 */
282 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 284 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
283 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) 285 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
284 type = OUTPUT_TMDS; 286 type = OUTPUT_TMDS;
285 else 287 else
@@ -321,11 +323,11 @@ detect_analog:
321static void 323static void
322nouveau_connector_force(struct drm_connector *connector) 324nouveau_connector_force(struct drm_connector *connector)
323{ 325{
324 struct drm_device *dev = connector->dev; 326 struct nouveau_connector *nv_connector = nouveau_connector(connector);
325 struct nouveau_encoder *nv_encoder; 327 struct nouveau_encoder *nv_encoder;
326 int type; 328 int type;
327 329
328 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 330 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
329 if (connector->force == DRM_FORCE_ON_DIGITAL) 331 if (connector->force == DRM_FORCE_ON_DIGITAL)
330 type = OUTPUT_TMDS; 332 type = OUTPUT_TMDS;
331 else 333 else
@@ -335,7 +337,7 @@ nouveau_connector_force(struct drm_connector *connector)
335 337
336 nv_encoder = find_encoder_by_type(connector, type); 338 nv_encoder = find_encoder_by_type(connector, type);
337 if (!nv_encoder) { 339 if (!nv_encoder) {
338 NV_ERROR(dev, "can't find encoder to force %s on!\n", 340 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
339 drm_get_connector_name(connector)); 341 drm_get_connector_name(connector));
340 connector->status = connector_status_disconnected; 342 connector->status = connector_status_disconnected;
341 return; 343 return;
@@ -369,7 +371,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
369 } 371 }
370 372
371 /* LVDS always needs gpu scaling */ 373 /* LVDS always needs gpu scaling */
372 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && 374 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS &&
373 value == DRM_MODE_SCALE_NONE) 375 value == DRM_MODE_SCALE_NONE)
374 return -EINVAL; 376 return -EINVAL;
375 377
@@ -535,7 +537,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
535 /* If we're not LVDS, destroy the previous native mode, the attached 537 /* If we're not LVDS, destroy the previous native mode, the attached
536 * monitor could have changed. 538 * monitor could have changed.
537 */ 539 */
538 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 540 if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
539 nv_connector->native_mode) { 541 nv_connector->native_mode) {
540 drm_mode_destroy(dev, nv_connector->native_mode); 542 drm_mode_destroy(dev, nv_connector->native_mode);
541 nv_connector->native_mode = NULL; 543 nv_connector->native_mode = NULL;
@@ -563,7 +565,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
563 ret = get_slave_funcs(nv_encoder)-> 565 ret = get_slave_funcs(nv_encoder)->
564 get_modes(to_drm_encoder(nv_encoder), connector); 566 get_modes(to_drm_encoder(nv_encoder), connector);
565 567
566 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 568 if (nv_encoder->dcb->type == OUTPUT_LVDS)
567 ret += nouveau_connector_scaler_modes_add(connector); 569 ret += nouveau_connector_scaler_modes_add(connector);
568 570
569 return ret; 571 return ret;
@@ -613,6 +615,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
613 615
614 clock *= 3; 616 clock *= 3;
615 break; 617 break;
618 default:
619 BUG_ON(1);
620 return MODE_BAD;
616 } 621 }
617 622
618 if (clock < min_clock) 623 if (clock < min_clock)
@@ -680,7 +685,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
680 /* Firstly try getting EDID over DDC, if allowed and I2C channel 685 /* Firstly try getting EDID over DDC, if allowed and I2C channel
681 * is available. 686 * is available.
682 */ 687 */
683 if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) 688 if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
684 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 689 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
685 690
686 if (i2c) { 691 if (i2c) {
@@ -695,7 +700,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
695 */ 700 */
696 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && 701 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
697 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 702 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
698 dev_priv->VBIOS.pub.fp_no_ddc)) { 703 dev_priv->vbios.fp_no_ddc)) {
699 nv_connector->native_mode = drm_mode_duplicate(dev, &native); 704 nv_connector->native_mode = drm_mode_duplicate(dev, &native);
700 goto out; 705 goto out;
701 } 706 }
@@ -704,7 +709,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
704 * stored for the panel stored in them. 709 * stored for the panel stored in them.
705 */ 710 */
706 if (!nv_connector->edid && !nv_connector->native_mode && 711 if (!nv_connector->edid && !nv_connector->native_mode &&
707 !dev_priv->VBIOS.pub.fp_no_ddc) { 712 !dev_priv->vbios.fp_no_ddc) {
708 struct edid *edid = 713 struct edid *edid =
709 (struct edid *)nouveau_bios_embedded_edid(dev); 714 (struct edid *)nouveau_bios_embedded_edid(dev);
710 if (edid) { 715 if (edid) {
@@ -739,46 +744,66 @@ out:
739} 744}
740 745
741int 746int
742nouveau_connector_create(struct drm_device *dev, int index, int type) 747nouveau_connector_create(struct drm_device *dev,
748 struct dcb_connector_table_entry *dcb)
743{ 749{
744 struct drm_nouveau_private *dev_priv = dev->dev_private; 750 struct drm_nouveau_private *dev_priv = dev->dev_private;
745 struct nouveau_connector *nv_connector = NULL; 751 struct nouveau_connector *nv_connector = NULL;
746 struct drm_connector *connector; 752 struct drm_connector *connector;
747 struct drm_encoder *encoder; 753 struct drm_encoder *encoder;
748 int ret; 754 int ret, type;
749 755
750 NV_DEBUG_KMS(dev, "\n"); 756 NV_DEBUG_KMS(dev, "\n");
751 757
752 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); 758 switch (dcb->type) {
753 if (!nv_connector) 759 case DCB_CONNECTOR_NONE:
754 return -ENOMEM; 760 return 0;
755 nv_connector->dcb = nouveau_bios_connector_entry(dev, index); 761 case DCB_CONNECTOR_VGA:
756 connector = &nv_connector->base;
757
758 switch (type) {
759 case DRM_MODE_CONNECTOR_VGA:
760 NV_INFO(dev, "Detected a VGA connector\n"); 762 NV_INFO(dev, "Detected a VGA connector\n");
763 type = DRM_MODE_CONNECTOR_VGA;
761 break; 764 break;
762 case DRM_MODE_CONNECTOR_DVID: 765 case DCB_CONNECTOR_TV_0:
763 NV_INFO(dev, "Detected a DVI-D connector\n"); 766 case DCB_CONNECTOR_TV_1:
767 case DCB_CONNECTOR_TV_3:
768 NV_INFO(dev, "Detected a TV connector\n");
769 type = DRM_MODE_CONNECTOR_TV;
764 break; 770 break;
765 case DRM_MODE_CONNECTOR_DVII: 771 case DCB_CONNECTOR_DVI_I:
766 NV_INFO(dev, "Detected a DVI-I connector\n"); 772 NV_INFO(dev, "Detected a DVI-I connector\n");
773 type = DRM_MODE_CONNECTOR_DVII;
767 break; 774 break;
768 case DRM_MODE_CONNECTOR_LVDS: 775 case DCB_CONNECTOR_DVI_D:
769 NV_INFO(dev, "Detected a LVDS connector\n"); 776 NV_INFO(dev, "Detected a DVI-D connector\n");
777 type = DRM_MODE_CONNECTOR_DVID;
770 break; 778 break;
771 case DRM_MODE_CONNECTOR_TV: 779 case DCB_CONNECTOR_HDMI_0:
772 NV_INFO(dev, "Detected a TV connector\n"); 780 case DCB_CONNECTOR_HDMI_1:
781 NV_INFO(dev, "Detected a HDMI connector\n");
782 type = DRM_MODE_CONNECTOR_HDMIA;
783 break;
784 case DCB_CONNECTOR_LVDS:
785 NV_INFO(dev, "Detected a LVDS connector\n");
786 type = DRM_MODE_CONNECTOR_LVDS;
773 break; 787 break;
774 case DRM_MODE_CONNECTOR_DisplayPort: 788 case DCB_CONNECTOR_DP:
775 NV_INFO(dev, "Detected a DisplayPort connector\n"); 789 NV_INFO(dev, "Detected a DisplayPort connector\n");
790 type = DRM_MODE_CONNECTOR_DisplayPort;
776 break; 791 break;
777 default: 792 case DCB_CONNECTOR_eDP:
778 NV_ERROR(dev, "Unknown connector, this is not good.\n"); 793 NV_INFO(dev, "Detected an eDP connector\n");
794 type = DRM_MODE_CONNECTOR_eDP;
779 break; 795 break;
796 default:
797 NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
798 return -EINVAL;
780 } 799 }
781 800
801 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
802 if (!nv_connector)
803 return -ENOMEM;
804 nv_connector->dcb = dcb;
805 connector = &nv_connector->base;
806
782 /* defaults, will get overridden in detect() */ 807 /* defaults, will get overridden in detect() */
783 connector->interlace_allowed = false; 808 connector->interlace_allowed = false;
784 connector->doublescan_allowed = false; 809 connector->doublescan_allowed = false;
@@ -786,55 +811,65 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
786 drm_connector_init(dev, connector, &nouveau_connector_funcs, type); 811 drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
787 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); 812 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
788 813
814 /* attach encoders */
815 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
816 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
817
818 if (nv_encoder->dcb->connector != dcb->index)
819 continue;
820
821 if (get_slave_funcs(nv_encoder))
822 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
823
824 drm_mode_connector_attach_encoder(connector, encoder);
825 }
826
827 if (!connector->encoder_ids[0]) {
828 NV_WARN(dev, " no encoders, ignoring\n");
829 drm_connector_cleanup(connector);
830 kfree(connector);
831 return 0;
832 }
833
789 /* Init DVI-I specific properties */ 834 /* Init DVI-I specific properties */
790 if (type == DRM_MODE_CONNECTOR_DVII) { 835 if (dcb->type == DCB_CONNECTOR_DVI_I) {
791 drm_mode_create_dvi_i_properties(dev); 836 drm_mode_create_dvi_i_properties(dev);
792 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); 837 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
793 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); 838 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
794 } 839 }
795 840
796 if (type != DRM_MODE_CONNECTOR_LVDS) 841 if (dcb->type != DCB_CONNECTOR_LVDS)
797 nv_connector->use_dithering = false; 842 nv_connector->use_dithering = false;
798 843
799 if (type == DRM_MODE_CONNECTOR_DVID || 844 switch (dcb->type) {
800 type == DRM_MODE_CONNECTOR_DVII || 845 case DCB_CONNECTOR_VGA:
801 type == DRM_MODE_CONNECTOR_LVDS || 846 if (dev_priv->card_type >= NV_50) {
802 type == DRM_MODE_CONNECTOR_DisplayPort) {
803 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
804
805 drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
806 nv_connector->scaling_mode);
807 drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
808 nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
809 : DRM_MODE_DITHERING_OFF);
810
811 } else {
812 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
813
814 if (type == DRM_MODE_CONNECTOR_VGA &&
815 dev_priv->card_type >= NV_50) {
816 drm_connector_attach_property(connector, 847 drm_connector_attach_property(connector,
817 dev->mode_config.scaling_mode_property, 848 dev->mode_config.scaling_mode_property,
818 nv_connector->scaling_mode); 849 nv_connector->scaling_mode);
819 } 850 }
820 } 851 /* fall-through */
821 852 case DCB_CONNECTOR_TV_0:
822 /* attach encoders */ 853 case DCB_CONNECTOR_TV_1:
823 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 854 case DCB_CONNECTOR_TV_3:
824 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 855 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
825 856 break;
826 if (nv_encoder->dcb->connector != index) 857 default:
827 continue; 858 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
828
829 if (get_slave_funcs(nv_encoder))
830 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
831 859
832 drm_mode_connector_attach_encoder(connector, encoder); 860 drm_connector_attach_property(connector,
861 dev->mode_config.scaling_mode_property,
862 nv_connector->scaling_mode);
863 drm_connector_attach_property(connector,
864 dev->mode_config.dithering_mode_property,
865 nv_connector->use_dithering ?
866 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
867 break;
833 } 868 }
834 869
835 drm_sysfs_connector_add(connector); 870 drm_sysfs_connector_add(connector);
836 871
837 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 872 if (dcb->type == DCB_CONNECTOR_LVDS) {
838 ret = nouveau_connector_create_lvds(dev, connector); 873 ret = nouveau_connector_create_lvds(dev, connector);
839 if (ret) { 874 if (ret) {
840 connector->funcs->destroy(connector); 875 connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 728b8090e5ff..4ef38abc2d9c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector(
49 return container_of(con, struct nouveau_connector, base); 49 return container_of(con, struct nouveau_connector, base);
50} 50}
51 51
52int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type); 52int nouveau_connector_create(struct drm_device *,
53 struct dcb_connector_table_entry *);
53 54
54#endif /* __NOUVEAU_CONNECTOR_H__ */ 55#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index d79db3698f16..8ff9ef5d4b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,12 +47,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); 47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); 48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2); 49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
50 if (chan->dma.ib_max) {
51 seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
52 seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
53 seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
54 }
50 55
51 seq_printf(m, "gpu fifo state:\n"); 56 seq_printf(m, "gpu fifo state:\n");
52 seq_printf(m, " get: 0x%08x\n", 57 seq_printf(m, " get: 0x%08x\n",
53 nvchan_rd32(chan, chan->user_get)); 58 nvchan_rd32(chan, chan->user_get));
54 seq_printf(m, " put: 0x%08x\n", 59 seq_printf(m, " put: 0x%08x\n",
55 nvchan_rd32(chan, chan->user_put)); 60 nvchan_rd32(chan, chan->user_put));
61 if (chan->dma.ib_max) {
62 seq_printf(m, " ib get: 0x%08x\n",
63 nvchan_rd32(chan, 0x88));
64 seq_printf(m, " ib put: 0x%08x\n",
65 nvchan_rd32(chan, 0x8c));
66 }
56 67
57 seq_printf(m, "last fence : %d\n", chan->fence.sequence); 68 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
58 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack); 69 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
@@ -133,9 +144,22 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
133 return 0; 144 return 0;
134} 145}
135 146
147static int
148nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
149{
150 struct drm_info_node *node = (struct drm_info_node *) m->private;
151 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
152 int i;
153
154 for (i = 0; i < dev_priv->vbios.length; i++)
155 seq_printf(m, "%c", dev_priv->vbios.data[i]);
156 return 0;
157}
158
136static struct drm_info_list nouveau_debugfs_list[] = { 159static struct drm_info_list nouveau_debugfs_list[] = {
137 { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, 160 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
138 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 161 { "memory", nouveau_debugfs_memory_info, 0, NULL },
162 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
139}; 163};
140#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 164#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
141 165
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfc94391d71e..cf1c5c0a0abe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
39 if (drm_fb->fbdev) 39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb); 40 nouveau_fbcon_remove(dev, drm_fb);
41 41
42 if (fb->nvbo) { 42 if (fb->nvbo)
43 mutex_lock(&dev->struct_mutex); 43 drm_gem_object_unreference_unlocked(fb->nvbo->gem);
44 drm_gem_object_unreference(fb->nvbo->gem);
45 mutex_unlock(&dev->struct_mutex);
46 }
47 44
48 drm_framebuffer_cleanup(drm_fb); 45 drm_framebuffer_cleanup(drm_fb);
49 kfree(fb); 46 kfree(fb);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 50d9e67745af..c8482a108a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -32,7 +32,22 @@
32void 32void
33nouveau_dma_pre_init(struct nouveau_channel *chan) 33nouveau_dma_pre_init(struct nouveau_channel *chan)
34{ 34{
35 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; 35 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
36 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
37
38 if (dev_priv->card_type == NV_50) {
39 const int ib_size = pushbuf->bo.mem.size / 2;
40
41 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
42 chan->dma.ib_max = (ib_size / 8) - 1;
43 chan->dma.ib_put = 0;
44 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
45
46 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
47 } else {
48 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
49 }
50
36 chan->dma.put = 0; 51 chan->dma.put = 0;
37 chan->dma.cur = chan->dma.put; 52 chan->dma.cur = chan->dma.put;
38 chan->dma.free = chan->dma.max - chan->dma.cur; 53 chan->dma.free = chan->dma.max - chan->dma.cur;
@@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
162 return (val - chan->pushbuf_base) >> 2; 177 return (val - chan->pushbuf_base) >> 2;
163} 178}
164 179
180void
181nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
182 int delta, int length)
183{
184 struct nouveau_bo *pb = chan->pushbuf_bo;
185 uint64_t offset = bo->bo.offset + delta;
186 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
187
188 BUG_ON(chan->dma.ib_free < 1);
189 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
194 chan->dma.ib_free--;
195}
196
197static int
198nv50_dma_push_wait(struct nouveau_channel *chan, int count)
199{
200 uint32_t cnt = 0, prev_get = 0;
201
202 while (chan->dma.ib_free < count) {
203 uint32_t get = nvchan_rd32(chan, 0x88);
204 if (get != prev_get) {
205 prev_get = get;
206 cnt = 0;
207 }
208
209 if ((++cnt & 0xff) == 0) {
210 DRM_UDELAY(1);
211 if (cnt > 100000)
212 return -EBUSY;
213 }
214
215 chan->dma.ib_free = get - chan->dma.ib_put;
216 if (chan->dma.ib_free <= 0)
217 chan->dma.ib_free += chan->dma.ib_max + 1;
218 }
219
220 return 0;
221}
222
223static int
224nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
225{
226 uint32_t cnt = 0, prev_get = 0;
227 int ret;
228
229 ret = nv50_dma_push_wait(chan, slots + 1);
230 if (unlikely(ret))
231 return ret;
232
233 while (chan->dma.free < count) {
234 int get = READ_GET(chan, &prev_get, &cnt);
235 if (unlikely(get < 0)) {
236 if (get == -EINVAL)
237 continue;
238
239 return get;
240 }
241
242 if (get <= chan->dma.cur) {
243 chan->dma.free = chan->dma.max - chan->dma.cur;
244 if (chan->dma.free >= count)
245 break;
246
247 FIRE_RING(chan);
248 do {
249 get = READ_GET(chan, &prev_get, &cnt);
250 if (unlikely(get < 0)) {
251 if (get == -EINVAL)
252 continue;
253 return get;
254 }
255 } while (get == 0);
256 chan->dma.cur = 0;
257 chan->dma.put = 0;
258 }
259
260 chan->dma.free = get - chan->dma.cur - 1;
261 }
262
263 return 0;
264}
265
165int 266int
166nouveau_dma_wait(struct nouveau_channel *chan, int size) 267nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
167{ 268{
168 uint32_t prev_get = 0, cnt = 0; 269 uint32_t prev_get = 0, cnt = 0;
169 int get; 270 int get;
170 271
272 if (chan->dma.ib_max)
273 return nv50_dma_wait(chan, slots, size);
274
171 while (chan->dma.free < size) { 275 while (chan->dma.free < size) {
172 get = READ_GET(chan, &prev_get, &cnt); 276 get = READ_GET(chan, &prev_get, &cnt);
173 if (unlikely(get == -EBUSY)) 277 if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dabfd655f93e..8b05c15866d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,6 +31,9 @@
31#define NOUVEAU_DMA_DEBUG 0 31#define NOUVEAU_DMA_DEBUG 0
32#endif 32#endif
33 33
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length);
36
34/* 37/*
35 * There's a hw race condition where you can't jump to your PUT offset, 38 * There's a hw race condition where you can't jump to your PUT offset,
36 * to avoid this we jump to offset + SKIPS and fill the difference with 39 * to avoid this we jump to offset + SKIPS and fill the difference with
@@ -96,13 +99,11 @@ enum {
96static __must_check inline int 99static __must_check inline int
97RING_SPACE(struct nouveau_channel *chan, int size) 100RING_SPACE(struct nouveau_channel *chan, int size)
98{ 101{
99 if (chan->dma.free < size) { 102 int ret;
100 int ret;
101 103
102 ret = nouveau_dma_wait(chan, size); 104 ret = nouveau_dma_wait(chan, 1, size);
103 if (ret) 105 if (ret)
104 return ret; 106 return ret;
105 }
106 107
107 chan->dma.free -= size; 108 chan->dma.free -= size;
108 return 0; 109 return 0;
@@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan)
146 return; 147 return;
147 chan->accel_done = true; 148 chan->accel_done = true;
148 149
149 WRITE_PUT(chan->dma.cur); 150 if (chan->dma.ib_max) {
151 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
152 (chan->dma.cur - chan->dma.put) << 2);
153 } else {
154 WRITE_PUT(chan->dma.cur);
155 }
156
150 chan->dma.put = chan->dma.cur; 157 chan->dma.put = chan->dma.cur;
151} 158}
152 159
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index da3b93b84502..30cc09e8a709 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -75,11 +75,11 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0; 75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77 77
78MODULE_PARM_DESC(noagp, "Disable all acceleration"); 78MODULE_PARM_DESC(noaccel, "Disable all acceleration");
79int nouveau_noaccel = 0; 79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400); 80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81 81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); 82MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0; 83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85 85
@@ -135,7 +135,7 @@ nouveau_pci_remove(struct pci_dev *pdev)
135 drm_put_dev(dev); 135 drm_put_dev(dev);
136} 136}
137 137
138static int 138int
139nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) 139nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
140{ 140{
141 struct drm_device *dev = pci_get_drvdata(pdev); 141 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -233,7 +233,7 @@ out_abort:
233 return ret; 233 return ret;
234} 234}
235 235
236static int 236int
237nouveau_pci_resume(struct pci_dev *pdev) 237nouveau_pci_resume(struct pci_dev *pdev)
238{ 238{
239 struct drm_device *dev = pci_get_drvdata(pdev); 239 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -402,8 +402,10 @@ static int __init nouveau_init(void)
402 nouveau_modeset = 1; 402 nouveau_modeset = 1;
403 } 403 }
404 404
405 if (nouveau_modeset == 1) 405 if (nouveau_modeset == 1) {
406 driver.driver_features |= DRIVER_MODESET; 406 driver.driver_features |= DRIVER_MODESET;
407 nouveau_register_dsm_handler();
408 }
407 409
408 return drm_init(&driver); 410 return drm_init(&driver);
409} 411}
@@ -411,6 +413,7 @@ static int __init nouveau_init(void)
411static void __exit nouveau_exit(void) 413static void __exit nouveau_exit(void)
412{ 414{
413 drm_exit(&driver); 415 drm_exit(&driver);
416 nouveau_unregister_dsm_handler();
414} 417}
415 418
416module_init(nouveau_init); 419module_init(nouveau_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c15ef37b71c..5f8d987af363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -34,7 +34,7 @@
34 34
35#define DRIVER_MAJOR 0 35#define DRIVER_MAJOR 0
36#define DRIVER_MINOR 0 36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 15 37#define DRIVER_PATCHLEVEL 16
38 38
39#define NOUVEAU_FAMILY 0x0000FFFF 39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000 40#define NOUVEAU_FLAGS 0xFFFF0000
@@ -83,6 +83,7 @@ struct nouveau_bo {
83 struct drm_file *reserved_by; 83 struct drm_file *reserved_by;
84 struct list_head entry; 84 struct list_head entry;
85 int pbbo_index; 85 int pbbo_index;
86 bool validate_mapped;
86 87
87 struct nouveau_channel *channel; 88 struct nouveau_channel *channel;
88 89
@@ -239,6 +240,11 @@ struct nouveau_channel {
239 int cur; 240 int cur;
240 int put; 241 int put;
241 /* access via pushbuf_bo */ 242 /* access via pushbuf_bo */
243
244 int ib_base;
245 int ib_max;
246 int ib_free;
247 int ib_put;
242 } dma; 248 } dma;
243 249
244 uint32_t sw_subchannel[8]; 250 uint32_t sw_subchannel[8];
@@ -533,6 +539,9 @@ struct drm_nouveau_private {
533 struct nouveau_engine engine; 539 struct nouveau_engine engine;
534 struct nouveau_channel *channel; 540 struct nouveau_channel *channel;
535 541
542 /* For PFIFO and PGRAPH. */
543 spinlock_t context_switch_lock;
544
536 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 545 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
537 struct nouveau_gpuobj *ramht; 546 struct nouveau_gpuobj *ramht;
538 uint32_t ramin_rsvd_vram; 547 uint32_t ramin_rsvd_vram;
@@ -596,8 +605,7 @@ struct drm_nouveau_private {
596 605
597 struct list_head gpuobj_list; 606 struct list_head gpuobj_list;
598 607
599 struct nvbios VBIOS; 608 struct nvbios vbios;
600 struct nouveau_bios_info *vbios;
601 609
602 struct nv04_mode_state mode_reg; 610 struct nv04_mode_state mode_reg;
603 struct nv04_mode_state saved_reg; 611 struct nv04_mode_state saved_reg;
@@ -614,7 +622,6 @@ struct drm_nouveau_private {
614 } susres; 622 } susres;
615 623
616 struct backlight_device *backlight; 624 struct backlight_device *backlight;
617 bool acpi_dsm;
618 625
619 struct nouveau_channel *evo; 626 struct nouveau_channel *evo;
620 627
@@ -682,6 +689,9 @@ extern int nouveau_ignorelid;
682extern int nouveau_nofbaccel; 689extern int nouveau_nofbaccel;
683extern int nouveau_noaccel; 690extern int nouveau_noaccel;
684 691
692extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
693extern int nouveau_pci_resume(struct pci_dev *pdev);
694
685/* nouveau_state.c */ 695/* nouveau_state.c */
686extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 696extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
687extern int nouveau_load(struct drm_device *, unsigned long flags); 697extern int nouveau_load(struct drm_device *, unsigned long flags);
@@ -696,12 +706,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
696 uint32_t reg, uint32_t mask, uint32_t val); 706 uint32_t reg, uint32_t mask, uint32_t val);
697extern bool nouveau_wait_for_idle(struct drm_device *); 707extern bool nouveau_wait_for_idle(struct drm_device *);
698extern int nouveau_card_init(struct drm_device *); 708extern int nouveau_card_init(struct drm_device *);
699extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
700 struct drm_file *);
701extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
702 struct drm_file *);
703extern int nouveau_ioctl_resume(struct drm_device *, void *data,
704 struct drm_file *);
705 709
706/* nouveau_mem.c */ 710/* nouveau_mem.c */
707extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, 711extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -845,21 +849,15 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
845/* nouveau_dma.c */ 849/* nouveau_dma.c */
846extern void nouveau_dma_pre_init(struct nouveau_channel *); 850extern void nouveau_dma_pre_init(struct nouveau_channel *);
847extern int nouveau_dma_init(struct nouveau_channel *); 851extern int nouveau_dma_init(struct nouveau_channel *);
848extern int nouveau_dma_wait(struct nouveau_channel *, int size); 852extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
849 853
850/* nouveau_acpi.c */ 854/* nouveau_acpi.c */
851#ifdef CONFIG_ACPI 855#if defined(CONFIG_ACPI)
852extern int nouveau_hybrid_setup(struct drm_device *dev); 856void nouveau_register_dsm_handler(void);
853extern bool nouveau_dsm_probe(struct drm_device *dev); 857void nouveau_unregister_dsm_handler(void);
854#else 858#else
855static inline int nouveau_hybrid_setup(struct drm_device *dev) 859static inline void nouveau_register_dsm_handler(void) {}
856{ 860static inline void nouveau_unregister_dsm_handler(void) {}
857 return 0;
858}
859static inline bool nouveau_dsm_probe(struct drm_device *dev)
860{
861 return false;
862}
863#endif 861#endif
864 862
865/* nouveau_backlight.c */ 863/* nouveau_backlight.c */
@@ -1027,6 +1025,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *);
1027extern int nv50_graph_load_context(struct nouveau_channel *); 1025extern int nv50_graph_load_context(struct nouveau_channel *);
1028extern int nv50_graph_unload_context(struct drm_device *); 1026extern int nv50_graph_unload_context(struct drm_device *);
1029extern void nv50_graph_context_switch(struct drm_device *); 1027extern void nv50_graph_context_switch(struct drm_device *);
1028extern int nv50_grctx_init(struct nouveau_grctx *);
1030 1029
1031/* nouveau_grctx.c */ 1030/* nouveau_grctx.c */
1032extern int nouveau_grctx_prog_load(struct drm_device *); 1031extern int nouveau_grctx_prog_load(struct drm_device *);
@@ -1152,16 +1151,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1152 struct drm_file *); 1151 struct drm_file *);
1153extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, 1152extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1154 struct drm_file *); 1153 struct drm_file *);
1155extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
1156 struct drm_file *);
1157extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
1158 struct drm_file *);
1159extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
1160 struct drm_file *);
1161extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
1162 struct drm_file *);
1163extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
1164 struct drm_file *);
1165extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, 1154extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1166 struct drm_file *); 1155 struct drm_file *);
1167extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, 1156extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ea879a2efef3..68cedd9194fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
36#include <linux/fb.h> 36#include <linux/fb.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/screen_info.h> 38#include <linux/screen_info.h>
39#include <linux/vga_switcheroo.h>
39 40
40#include "drmP.h" 41#include "drmP.h"
41#include "drm.h" 42#include "drm.h"
@@ -370,6 +371,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
370 nvbo->bo.offset, nvbo); 371 nvbo->bo.offset, nvbo);
371 372
372 mutex_unlock(&dev->struct_mutex); 373 mutex_unlock(&dev->struct_mutex);
374 vga_switcheroo_client_fb_set(dev->pdev, info);
373 return 0; 375 return 0;
374 376
375out_unref: 377out_unref:
@@ -401,10 +403,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
401 403
402 unregister_framebuffer(info); 404 unregister_framebuffer(info);
403 nouveau_bo_unmap(nouveau_fb->nvbo); 405 nouveau_bo_unmap(nouveau_fb->nvbo);
404 mutex_lock(&dev->struct_mutex); 406 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
405 drm_gem_object_unreference(nouveau_fb->nvbo->gem);
406 nouveau_fb->nvbo = NULL; 407 nouveau_fb->nvbo = NULL;
407 mutex_unlock(&dev->struct_mutex);
408 if (par) 408 if (par)
409 drm_fb_helper_free(&par->helper); 409 drm_fb_helper_free(&par->helper);
410 framebuffer_release(info); 410 framebuffer_release(info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 70cc30803e3b..0d22f66f1c79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 167
168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out: 169out:
170 mutex_lock(&dev->struct_mutex); 170 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
171 drm_gem_object_handle_unreference(nvbo->gem);
172 mutex_unlock(&dev->struct_mutex);
173 171
174 if (ret) 172 if (ret)
175 drm_gem_object_unreference(nvbo->gem); 173 drm_gem_object_unreference_unlocked(nvbo->gem);
176 return ret; 174 return ret;
177} 175}
178 176
@@ -243,6 +241,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
243 nouveau_fence_unref((void *)&prev_fence); 241 nouveau_fence_unref((void *)&prev_fence);
244 } 242 }
245 243
244 if (unlikely(nvbo->validate_mapped)) {
245 ttm_bo_kunmap(&nvbo->kmap);
246 nvbo->validate_mapped = false;
247 }
248
246 list_del(&nvbo->entry); 249 list_del(&nvbo->entry);
247 nvbo->reserved_by = NULL; 250 nvbo->reserved_by = NULL;
248 ttm_bo_unreserve(&nvbo->bo); 251 ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +305,14 @@ retry:
302 if (ret == -EAGAIN) 305 if (ret == -EAGAIN)
303 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 306 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
304 drm_gem_object_unreference(gem); 307 drm_gem_object_unreference(gem);
305 if (ret) 308 if (ret) {
309 NV_ERROR(dev, "fail reserve\n");
306 return ret; 310 return ret;
311 }
307 goto retry; 312 goto retry;
308 } 313 }
309 314
315 b->user_priv = (uint64_t)(unsigned long)nvbo;
310 nvbo->reserved_by = file_priv; 316 nvbo->reserved_by = file_priv;
311 nvbo->pbbo_index = i; 317 nvbo->pbbo_index = i;
312 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 318 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +342,10 @@ retry:
336 } 342 }
337 343
338 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 344 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
339 if (ret) 345 if (ret) {
346 NV_ERROR(dev, "fail wait_cpu\n");
340 return ret; 347 return ret;
348 }
341 goto retry; 349 goto retry;
342 } 350 }
343 } 351 }
@@ -351,6 +359,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
351{ 359{
352 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 360 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
353 (void __force __user *)(uintptr_t)user_pbbo_ptr; 361 (void __force __user *)(uintptr_t)user_pbbo_ptr;
362 struct drm_device *dev = chan->dev;
354 struct nouveau_bo *nvbo; 363 struct nouveau_bo *nvbo;
355 int ret, relocs = 0; 364 int ret, relocs = 0;
356 365
@@ -362,39 +371,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
362 spin_lock(&nvbo->bo.lock); 371 spin_lock(&nvbo->bo.lock);
363 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 372 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
364 spin_unlock(&nvbo->bo.lock); 373 spin_unlock(&nvbo->bo.lock);
365 if (unlikely(ret)) 374 if (unlikely(ret)) {
375 NV_ERROR(dev, "fail wait other chan\n");
366 return ret; 376 return ret;
377 }
367 } 378 }
368 379
369 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 380 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
370 b->write_domains, 381 b->write_domains,
371 b->valid_domains); 382 b->valid_domains);
372 if (unlikely(ret)) 383 if (unlikely(ret)) {
384 NV_ERROR(dev, "fail set_domain\n");
373 return ret; 385 return ret;
386 }
374 387
375 nvbo->channel = chan; 388 nvbo->channel = chan;
376 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 389 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
377 false, false); 390 false, false);
378 nvbo->channel = NULL; 391 nvbo->channel = NULL;
379 if (unlikely(ret)) 392 if (unlikely(ret)) {
393 NV_ERROR(dev, "fail ttm_validate\n");
380 return ret; 394 return ret;
395 }
381 396
382 if (nvbo->bo.offset == b->presumed_offset && 397 if (nvbo->bo.offset == b->presumed.offset &&
383 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 398 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
384 b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || 399 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
385 (nvbo->bo.mem.mem_type == TTM_PL_TT && 400 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
386 b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) 401 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
387 continue; 402 continue;
388 403
389 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 404 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
390 b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; 405 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
391 else 406 else
392 b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; 407 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
393 b->presumed_offset = nvbo->bo.offset; 408 b->presumed.offset = nvbo->bo.offset;
394 b->presumed_ok = 0; 409 b->presumed.valid = 0;
395 relocs++; 410 relocs++;
396 411
397 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) 412 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
413 &b->presumed, sizeof(b->presumed)))
398 return -EFAULT; 414 return -EFAULT;
399 } 415 }
400 416
@@ -408,6 +424,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
408 uint64_t user_buffers, int nr_buffers, 424 uint64_t user_buffers, int nr_buffers,
409 struct validate_op *op, int *apply_relocs) 425 struct validate_op *op, int *apply_relocs)
410{ 426{
427 struct drm_device *dev = chan->dev;
411 int ret, relocs = 0; 428 int ret, relocs = 0;
412 429
413 INIT_LIST_HEAD(&op->vram_list); 430 INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +435,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
418 return 0; 435 return 0;
419 436
420 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 437 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
421 if (unlikely(ret)) 438 if (unlikely(ret)) {
439 NV_ERROR(dev, "validate_init\n");
422 return ret; 440 return ret;
441 }
423 442
424 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 443 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
425 if (unlikely(ret < 0)) { 444 if (unlikely(ret < 0)) {
445 NV_ERROR(dev, "validate vram_list\n");
426 validate_fini(op, NULL); 446 validate_fini(op, NULL);
427 return ret; 447 return ret;
428 } 448 }
@@ -430,6 +450,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
430 450
431 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 451 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
432 if (unlikely(ret < 0)) { 452 if (unlikely(ret < 0)) {
453 NV_ERROR(dev, "validate gart_list\n");
433 validate_fini(op, NULL); 454 validate_fini(op, NULL);
434 return ret; 455 return ret;
435 } 456 }
@@ -437,6 +458,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
437 458
438 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 459 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
439 if (unlikely(ret < 0)) { 460 if (unlikely(ret < 0)) {
461 NV_ERROR(dev, "validate both_list\n");
440 validate_fini(op, NULL); 462 validate_fini(op, NULL);
441 return ret; 463 return ret;
442 } 464 }
@@ -465,59 +487,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
465} 487}
466 488
467static int 489static int
468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 490nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
469 struct drm_nouveau_gem_pushbuf_bo *bo, 491 struct drm_nouveau_gem_pushbuf *req,
470 unsigned nr_relocs, uint64_t ptr_relocs, 492 struct drm_nouveau_gem_pushbuf_bo *bo)
471 unsigned nr_dwords, unsigned first_dword,
472 uint32_t *pushbuf, bool is_iomem)
473{ 493{
474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 494 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
475 struct drm_device *dev = chan->dev;
476 int ret = 0; 495 int ret = 0;
477 unsigned i; 496 unsigned i;
478 497
479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 498 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
480 if (IS_ERR(reloc)) 499 if (IS_ERR(reloc))
481 return PTR_ERR(reloc); 500 return PTR_ERR(reloc);
482 501
483 for (i = 0; i < nr_relocs; i++) { 502 for (i = 0; i < req->nr_relocs; i++) {
484 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 503 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
485 struct drm_nouveau_gem_pushbuf_bo *b; 504 struct drm_nouveau_gem_pushbuf_bo *b;
505 struct nouveau_bo *nvbo;
486 uint32_t data; 506 uint32_t data;
487 507
488 if (r->bo_index >= nr_bo || r->reloc_index < first_dword || 508 if (unlikely(r->bo_index > req->nr_buffers)) {
489 r->reloc_index >= first_dword + nr_dwords) { 509 NV_ERROR(dev, "reloc bo index invalid\n");
490 NV_ERROR(dev, "Bad relocation %d\n", i);
491 NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
492 NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
493 ret = -EINVAL; 510 ret = -EINVAL;
494 break; 511 break;
495 } 512 }
496 513
497 b = &bo[r->bo_index]; 514 b = &bo[r->bo_index];
498 if (b->presumed_ok) 515 if (b->presumed.valid)
499 continue; 516 continue;
500 517
518 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
519 NV_ERROR(dev, "reloc container bo index invalid\n");
520 ret = -EINVAL;
521 break;
522 }
523 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
524
525 if (unlikely(r->reloc_bo_offset + 4 >
526 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
527 NV_ERROR(dev, "reloc outside of bo\n");
528 ret = -EINVAL;
529 break;
530 }
531
532 if (!nvbo->kmap.virtual) {
533 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
534 &nvbo->kmap);
535 if (ret) {
536 NV_ERROR(dev, "failed kmap for reloc\n");
537 break;
538 }
539 nvbo->validate_mapped = true;
540 }
541
501 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 542 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
502 data = b->presumed_offset + r->data; 543 data = b->presumed.offset + r->data;
503 else 544 else
504 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 545 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
505 data = (b->presumed_offset + r->data) >> 32; 546 data = (b->presumed.offset + r->data) >> 32;
506 else 547 else
507 data = r->data; 548 data = r->data;
508 549
509 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 550 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
510 if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) 551 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
511 data |= r->tor; 552 data |= r->tor;
512 else 553 else
513 data |= r->vor; 554 data |= r->vor;
514 } 555 }
515 556
516 if (is_iomem) 557 spin_lock(&nvbo->bo.lock);
517 iowrite32_native(data, (void __force __iomem *) 558 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
518 &pushbuf[r->reloc_index]); 559 spin_unlock(&nvbo->bo.lock);
519 else 560 if (ret) {
520 pushbuf[r->reloc_index] = data; 561 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
562 break;
563 }
564
565 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
521 } 566 }
522 567
523 kfree(reloc); 568 kfree(reloc);
@@ -528,127 +573,50 @@ int
528nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 573nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
529 struct drm_file *file_priv) 574 struct drm_file *file_priv)
530{ 575{
576 struct drm_nouveau_private *dev_priv = dev->dev_private;
531 struct drm_nouveau_gem_pushbuf *req = data; 577 struct drm_nouveau_gem_pushbuf *req = data;
532 struct drm_nouveau_gem_pushbuf_bo *bo = NULL; 578 struct drm_nouveau_gem_pushbuf_push *push;
579 struct drm_nouveau_gem_pushbuf_bo *bo;
533 struct nouveau_channel *chan; 580 struct nouveau_channel *chan;
534 struct validate_op op; 581 struct validate_op op;
535 struct nouveau_fence* fence = 0; 582 struct nouveau_fence *fence = 0;
536 uint32_t *pushbuf = NULL; 583 int i, j, ret = 0, do_reloc = 0;
537 int ret = 0, do_reloc = 0, i;
538 584
539 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 585 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
540 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); 586 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
541 587
542 if (req->nr_dwords >= chan->dma.max || 588 req->vram_available = dev_priv->fb_aper_free;
543 req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || 589 req->gart_available = dev_priv->gart_info.aper_free;
544 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { 590 if (unlikely(req->nr_push == 0))
545 NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); 591 goto out_next;
546 NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
547 chan->dma.max - 1);
548 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
549 NOUVEAU_GEM_MAX_BUFFERS);
550 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
551 NOUVEAU_GEM_MAX_RELOCS);
552 return -EINVAL;
553 }
554
555 pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
556 if (IS_ERR(pushbuf))
557 return PTR_ERR(pushbuf);
558
559 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
560 if (IS_ERR(bo)) {
561 kfree(pushbuf);
562 return PTR_ERR(bo);
563 }
564
565 mutex_lock(&dev->struct_mutex);
566
567 /* Validate buffer list */
568 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
569 req->nr_buffers, &op, &do_reloc);
570 if (ret)
571 goto out;
572
573 /* Apply any relocations that are required */
574 if (do_reloc) {
575 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
576 bo, req->nr_relocs,
577 req->relocs,
578 req->nr_dwords, 0,
579 pushbuf, false);
580 if (ret)
581 goto out;
582 }
583
584 /* Emit push buffer to the hw
585 */
586 ret = RING_SPACE(chan, req->nr_dwords);
587 if (ret)
588 goto out;
589
590 OUT_RINGp(chan, pushbuf, req->nr_dwords);
591 592
592 ret = nouveau_fence_new(chan, &fence, true); 593 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
593 if (ret) { 594 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
594 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 595 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
595 WIND_RING(chan); 596 return -EINVAL;
596 goto out;
597 } 597 }
598 598
599 if (nouveau_gem_pushbuf_sync(chan)) { 599 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
600 ret = nouveau_fence_wait(fence, NULL, false, false); 600 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
601 if (ret) { 601 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
602 for (i = 0; i < req->nr_dwords; i++) 602 return -EINVAL;
603 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
604 NV_ERROR(dev, "^^ above push buffer is fail :(\n");
605 }
606 } 603 }
607 604
608out: 605 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
609 validate_fini(&op, fence); 606 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
610 nouveau_fence_unref((void**)&fence); 607 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
611 mutex_unlock(&dev->struct_mutex);
612 kfree(pushbuf);
613 kfree(bo);
614 return ret;
615}
616
617#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
618
619int
620nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
621 struct drm_file *file_priv)
622{
623 struct drm_nouveau_private *dev_priv = dev->dev_private;
624 struct drm_nouveau_gem_pushbuf_call *req = data;
625 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
626 struct nouveau_channel *chan;
627 struct drm_gem_object *gem;
628 struct nouveau_bo *pbbo;
629 struct validate_op op;
630 struct nouveau_fence* fence = 0;
631 int i, ret = 0, do_reloc = 0;
632
633 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
634 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
635
636 if (unlikely(req->handle == 0))
637 goto out_next;
638
639 if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
640 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
641 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
642 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
643 NOUVEAU_GEM_MAX_BUFFERS);
644 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
645 NOUVEAU_GEM_MAX_RELOCS);
646 return -EINVAL; 608 return -EINVAL;
647 } 609 }
648 610
611 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
612 if (IS_ERR(push))
613 return PTR_ERR(push);
614
649 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 615 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
650 if (IS_ERR(bo)) 616 if (IS_ERR(bo)) {
617 kfree(push);
651 return PTR_ERR(bo); 618 return PTR_ERR(bo);
619 }
652 620
653 mutex_lock(&dev->struct_mutex); 621 mutex_lock(&dev->struct_mutex);
654 622
@@ -660,122 +628,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
660 goto out; 628 goto out;
661 } 629 }
662 630
663 /* Validate DMA push buffer */
664 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
665 if (!gem) {
666 NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
667 ret = -EINVAL;
668 goto out;
669 }
670 pbbo = nouveau_gem_object(gem);
671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
685 chan->fence.sequence);
686 if (ret) {
687 NV_ERROR(dev, "resv pb: %d\n", ret);
688 drm_gem_object_unreference(gem);
689 goto out;
690 }
691
692 nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
693 ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
694 if (ret) {
695 NV_ERROR(dev, "validate pb: %d\n", ret);
696 ttm_bo_unreserve(&pbbo->bo);
697 drm_gem_object_unreference(gem);
698 goto out;
699 }
700
701 list_add_tail(&pbbo->entry, &op.both_list);
702
703 /* If presumed return address doesn't match, we need to map the
704 * push buffer and fix it..
705 */
706 if (!PUSHBUF_CAL) {
707 uint32_t retaddy;
708
709 if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
710 ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
711 if (ret) {
712 NV_ERROR(dev, "jmp_space: %d\n", ret);
713 goto out;
714 }
715 }
716
717 retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
718 retaddy |= 0x20000000;
719 if (retaddy != req->suffix0) {
720 req->suffix0 = retaddy;
721 do_reloc = 1;
722 }
723 }
724
725 /* Apply any relocations that are required */ 631 /* Apply any relocations that are required */
726 if (do_reloc) { 632 if (do_reloc) {
727 void *pbvirt; 633 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
728 bool is_iomem;
729 ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
730 &pbbo->kmap);
731 if (ret) { 634 if (ret) {
732 NV_ERROR(dev, "kmap pb: %d\n", ret); 635 NV_ERROR(dev, "reloc apply: %d\n", ret);
733 goto out; 636 goto out;
734 } 637 }
638 }
735 639
736 pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); 640 if (chan->dma.ib_max) {
737 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, 641 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
738 req->nr_relocs,
739 req->relocs,
740 req->nr_dwords,
741 req->offset / 4,
742 pbvirt, is_iomem);
743
744 if (!PUSHBUF_CAL) {
745 nouveau_bo_wr32(pbbo,
746 req->offset / 4 + req->nr_dwords - 2,
747 req->suffix0);
748 }
749
750 ttm_bo_kunmap(&pbbo->kmap);
751 if (ret) { 642 if (ret) {
752 NV_ERROR(dev, "reloc apply: %d\n", ret); 643 NV_INFO(dev, "nv50cal_space: %d\n", ret);
753 goto out; 644 goto out;
754 } 645 }
755 }
756 646
757 if (PUSHBUF_CAL) { 647 for (i = 0; i < req->nr_push; i++) {
758 ret = RING_SPACE(chan, 2); 648 struct nouveau_bo *nvbo = (void *)(unsigned long)
649 bo[push[i].bo_index].user_priv;
650
651 nv50_dma_push(chan, nvbo, push[i].offset,
652 push[i].length);
653 }
654 } else
655 if (dev_priv->card_type >= NV_20) {
656 ret = RING_SPACE(chan, req->nr_push * 2);
759 if (ret) { 657 if (ret) {
760 NV_ERROR(dev, "cal_space: %d\n", ret); 658 NV_ERROR(dev, "cal_space: %d\n", ret);
761 goto out; 659 goto out;
762 } 660 }
763 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + 661
764 req->offset) | 2); 662 for (i = 0; i < req->nr_push; i++) {
765 OUT_RING(chan, 0); 663 struct nouveau_bo *nvbo = (void *)(unsigned long)
664 bo[push[i].bo_index].user_priv;
665 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
666
667 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
668 push[i].offset) | 2);
669 OUT_RING(chan, 0);
670 }
766 } else { 671 } else {
767 ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); 672 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
768 if (ret) { 673 if (ret) {
769 NV_ERROR(dev, "jmp_space: %d\n", ret); 674 NV_ERROR(dev, "jmp_space: %d\n", ret);
770 goto out; 675 goto out;
771 } 676 }
772 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
773 req->offset) | 0x20000000);
774 OUT_RING(chan, 0);
775 677
776 /* Space the jumps apart with NOPs. */ 678 for (i = 0; i < req->nr_push; i++) {
777 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 679 struct nouveau_bo *nvbo = (void *)(unsigned long)
680 bo[push[i].bo_index].user_priv;
681 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
682 uint32_t cmd;
683
684 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
685 cmd |= 0x20000000;
686 if (unlikely(cmd != req->suffix0)) {
687 if (!nvbo->kmap.virtual) {
688 ret = ttm_bo_kmap(&nvbo->bo, 0,
689 nvbo->bo.mem.
690 num_pages,
691 &nvbo->kmap);
692 if (ret) {
693 WIND_RING(chan);
694 goto out;
695 }
696 nvbo->validate_mapped = true;
697 }
698
699 nouveau_bo_wr32(nvbo, (push[i].offset +
700 push[i].length - 8) / 4, cmd);
701 }
702
703 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
704 push[i].offset) | 0x20000000);
778 OUT_RING(chan, 0); 705 OUT_RING(chan, 0);
706 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
707 OUT_RING(chan, 0);
708 }
779 } 709 }
780 710
781 ret = nouveau_fence_new(chan, &fence, true); 711 ret = nouveau_fence_new(chan, &fence, true);
@@ -790,9 +720,14 @@ out:
790 nouveau_fence_unref((void**)&fence); 720 nouveau_fence_unref((void**)&fence);
791 mutex_unlock(&dev->struct_mutex); 721 mutex_unlock(&dev->struct_mutex);
792 kfree(bo); 722 kfree(bo);
723 kfree(push);
793 724
794out_next: 725out_next:
795 if (PUSHBUF_CAL) { 726 if (chan->dma.ib_max) {
727 req->suffix0 = 0x00000000;
728 req->suffix1 = 0x00000000;
729 } else
730 if (dev_priv->card_type >= NV_20) {
796 req->suffix0 = 0x00020000; 731 req->suffix0 = 0x00020000;
797 req->suffix1 = 0x00000000; 732 req->suffix1 = 0x00000000;
798 } else { 733 } else {
@@ -804,19 +739,6 @@ out_next:
804 return ret; 739 return ret;
805} 740}
806 741
807int
808nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
810{
811 struct drm_nouveau_private *dev_priv = dev->dev_private;
812 struct drm_nouveau_gem_pushbuf_call *req = data;
813
814 req->vram_available = dev_priv->fb_aper_free;
815 req->gart_available = dev_priv->gart_info.aper_free;
816
817 return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
818}
819
820static inline uint32_t 742static inline uint32_t
821domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) 743domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
822{ 744{
@@ -831,74 +753,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
831} 753}
832 754
833int 755int
834nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
835 struct drm_file *file_priv)
836{
837 struct drm_nouveau_gem_pin *req = data;
838 struct drm_gem_object *gem;
839 struct nouveau_bo *nvbo;
840 int ret = 0;
841
842 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
843
844 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
845 NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
846 return -EINVAL;
847 }
848
849 if (!DRM_SUSER(DRM_CURPROC))
850 return -EPERM;
851
852 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
853 if (!gem)
854 return -EINVAL;
855 nvbo = nouveau_gem_object(gem);
856
857 ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
858 if (ret)
859 goto out;
860
861 req->offset = nvbo->bo.offset;
862 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
863 req->domain = NOUVEAU_GEM_DOMAIN_GART;
864 else
865 req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
866
867out:
868 mutex_lock(&dev->struct_mutex);
869 drm_gem_object_unreference(gem);
870 mutex_unlock(&dev->struct_mutex);
871
872 return ret;
873}
874
875int
876nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
877 struct drm_file *file_priv)
878{
879 struct drm_nouveau_gem_pin *req = data;
880 struct drm_gem_object *gem;
881 int ret;
882
883 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
884
885 if (drm_core_check_feature(dev, DRIVER_MODESET))
886 return -EINVAL;
887
888 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
889 if (!gem)
890 return -EINVAL;
891
892 ret = nouveau_bo_unpin(nouveau_gem_object(gem));
893
894 mutex_lock(&dev->struct_mutex);
895 drm_gem_object_unreference(gem);
896 mutex_unlock(&dev->struct_mutex);
897
898 return ret;
899}
900
901int
902nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 756nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
903 struct drm_file *file_priv) 757 struct drm_file *file_priv)
904{ 758{
@@ -935,9 +789,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
935 } 789 }
936 790
937out: 791out:
938 mutex_lock(&dev->struct_mutex); 792 drm_gem_object_unreference_unlocked(gem);
939 drm_gem_object_unreference(gem);
940 mutex_unlock(&dev->struct_mutex);
941 return ret; 793 return ret;
942} 794}
943 795
@@ -965,9 +817,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
965 ret = 0; 817 ret = 0;
966 818
967out: 819out:
968 mutex_lock(&dev->struct_mutex); 820 drm_gem_object_unreference_unlocked(gem);
969 drm_gem_object_unreference(gem);
970 mutex_unlock(&dev->struct_mutex);
971 return ret; 821 return ret;
972} 822}
973 823
@@ -986,9 +836,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
986 return -EINVAL; 836 return -EINVAL;
987 837
988 ret = nouveau_gem_info(gem, req); 838 ret = nouveau_gem_info(gem, req);
989 mutex_lock(&dev->struct_mutex); 839 drm_gem_object_unreference_unlocked(gem);
990 drm_gem_object_unreference(gem);
991 mutex_unlock(&dev->struct_mutex);
992 return ret; 840 return ret;
993} 841}
994 842
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index dc46792a5c96..7855b35effc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -160,7 +160,7 @@ static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv) 160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{ 161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private; 162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios->chip_version; 163 int chip_version = dev_priv->vbios.chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg); 164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; 165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; 166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
@@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv) 216 struct nouveau_pll_vals *pv)
217{ 217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios->chip_version; 219 int chip_version = dev_priv->vbios.chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35; 220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70); 221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1); 222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
@@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv) 374 struct nouveau_pll_vals *pv)
375{ 375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private; 376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios->chip_version; 377 int cv = dev_priv->vbios.chip_version;
378 378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || 379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) { 380 cv >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 70e994d28122..88583e7bf651 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,16 @@ struct nouveau_i2c_chan *
254nouveau_i2c_find(struct drm_device *dev, int index) 254nouveau_i2c_find(struct drm_device *dev, int index)
255{ 255{
256 struct drm_nouveau_private *dev_priv = dev->dev_private; 256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct nvbios *bios = &dev_priv->VBIOS; 257 struct nvbios *bios = &dev_priv->vbios;
258 258
259 if (index > DCB_MAX_NUM_I2C_ENTRIES) 259 if (index >= DCB_MAX_NUM_I2C_ENTRIES)
260 return NULL; 260 return NULL;
261 261
262 if (!bios->bdcb.dcb.i2c[index].chan) { 262 if (!bios->dcb.i2c[index].chan) {
263 if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index)) 263 if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
264 return NULL; 264 return NULL;
265 } 265 }
266 266
267 return bios->bdcb.dcb.i2c[index].chan; 267 return bios->dcb.i2c[index].chan;
268} 268}
269 269
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 447f9f69d6b1..95220ddebb45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
691 struct drm_device *dev = (struct drm_device *)arg; 691 struct drm_device *dev = (struct drm_device *)arg;
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 692 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 uint32_t status, fbdev_flags = 0; 693 uint32_t status, fbdev_flags = 0;
694 unsigned long flags;
694 695
695 status = nv_rd32(dev, NV03_PMC_INTR_0); 696 status = nv_rd32(dev, NV03_PMC_INTR_0);
696 if (!status) 697 if (!status)
697 return IRQ_NONE; 698 return IRQ_NONE;
698 699
700 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
701
699 if (dev_priv->fbdev_info) { 702 if (dev_priv->fbdev_info) {
700 fbdev_flags = dev_priv->fbdev_info->flags; 703 fbdev_flags = dev_priv->fbdev_info->flags;
701 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; 704 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
733 if (dev_priv->fbdev_info) 736 if (dev_priv->fbdev_info)
734 dev_priv->fbdev_info->flags = fbdev_flags; 737 dev_priv->fbdev_info->flags = fbdev_flags;
735 738
739 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
740
736 return IRQ_HANDLED; 741 return IRQ_HANDLED;
737} 742}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index d99dc087f9b1..9537f3e30115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
61 61
62 chan->notifier_bo = ntfy; 62 chan->notifier_bo = ntfy;
63out_err: 63out_err:
64 if (ret) { 64 if (ret)
65 mutex_lock(&dev->struct_mutex); 65 drm_gem_object_unreference_unlocked(ntfy->gem);
66 drm_gem_object_unreference(ntfy->gem);
67 mutex_unlock(&dev->struct_mutex);
68 }
69 66
70 return ret; 67 return ret;
71} 68}
@@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
81 nouveau_bo_unmap(chan->notifier_bo); 78 nouveau_bo_unmap(chan->notifier_bo);
82 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
83 nouveau_bo_unpin(chan->notifier_bo); 80 nouveau_bo_unpin(chan->notifier_bo);
84 drm_gem_object_unreference(chan->notifier_bo->gem);
85 mutex_unlock(&dev->struct_mutex); 81 mutex_unlock(&dev->struct_mutex);
82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
86 nouveau_mem_takedown(&chan->notifier_heap); 83 nouveau_mem_takedown(&chan->notifier_heap);
87} 84}
88 85
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4851af5b05e..eb8f084d5f53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -29,6 +29,7 @@
29#include "drm_sarea.h" 29#include "drm_sarea.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31#include <linux/vgaarb.h> 31#include <linux/vgaarb.h>
32#include <linux/vga_switcheroo.h>
32 33
33#include "nouveau_drv.h" 34#include "nouveau_drv.h"
34#include "nouveau_drm.h" 35#include "nouveau_drm.h"
@@ -371,6 +372,30 @@ out_err:
371 return ret; 372 return ret;
372} 373}
373 374
375static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
376 enum vga_switcheroo_state state)
377{
378 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
379 if (state == VGA_SWITCHEROO_ON) {
380 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
381 nouveau_pci_resume(pdev);
382 } else {
383 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
384 nouveau_pci_suspend(pdev, pmm);
385 }
386}
387
388static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
389{
390 struct drm_device *dev = pci_get_drvdata(pdev);
391 bool can_switch;
392
393 spin_lock(&dev->count_lock);
394 can_switch = (dev->open_count == 0);
395 spin_unlock(&dev->count_lock);
396 return can_switch;
397}
398
374int 399int
375nouveau_card_init(struct drm_device *dev) 400nouveau_card_init(struct drm_device *dev)
376{ 401{
@@ -384,6 +409,8 @@ nouveau_card_init(struct drm_device *dev)
384 return 0; 409 return 0;
385 410
386 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 411 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
412 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
413 nouveau_switcheroo_can_switch);
387 414
388 /* Initialise internal driver API hooks */ 415 /* Initialise internal driver API hooks */
389 ret = nouveau_init_engine_ptrs(dev); 416 ret = nouveau_init_engine_ptrs(dev);
@@ -391,6 +418,7 @@ nouveau_card_init(struct drm_device *dev)
391 goto out; 418 goto out;
392 engine = &dev_priv->engine; 419 engine = &dev_priv->engine;
393 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; 420 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
421 spin_lock_init(&dev_priv->context_switch_lock);
394 422
395 /* Parse BIOS tables / Run init tables if card not POSTed */ 423 /* Parse BIOS tables / Run init tables if card not POSTed */
396 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 424 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -617,11 +645,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
617 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", 645 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
618 dev->pci_vendor, dev->pci_device, dev->pdev->class); 646 dev->pci_vendor, dev->pci_device, dev->pdev->class);
619 647
620 dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
621
622 if (dev_priv->acpi_dsm)
623 nouveau_hybrid_setup(dev);
624
625 dev_priv->wq = create_workqueue("nouveau"); 648 dev_priv->wq = create_workqueue("nouveau");
626 if (!dev_priv->wq) 649 if (!dev_priv->wq)
627 return -EINVAL; 650 return -EINVAL;
@@ -776,13 +799,6 @@ int nouveau_unload(struct drm_device *dev)
776 return 0; 799 return 0;
777} 800}
778 801
779int
780nouveau_ioctl_card_init(struct drm_device *dev, void *data,
781 struct drm_file *file_priv)
782{
783 return nouveau_card_init(dev);
784}
785
786int nouveau_ioctl_getparam(struct drm_device *dev, void *data, 802int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
787 struct drm_file *file_priv) 803 struct drm_file *file_priv)
788{ 804{
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index d2f143ed97c1..a1d1ebb073d9 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); 926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
927 nv_crtc->cursor.show(nv_crtc, true); 927 nv_crtc->cursor.show(nv_crtc, true);
928out: 928out:
929 mutex_lock(&dev->struct_mutex); 929 drm_gem_object_unreference_unlocked(gem);
930 drm_gem_object_unreference(gem);
931 mutex_unlock(&dev->struct_mutex);
932 return ret; 930 return ret;
933} 931}
934 932
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1d73b15d70da..1cb19e3acb55 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
230 if (dcb->type == OUTPUT_TV) { 230 if (dcb->type == OUTPUT_TV) {
231 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); 231 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
232 232
233 if (dev_priv->vbios->tvdactestval) 233 if (dev_priv->vbios.tvdactestval)
234 testval = dev_priv->vbios->tvdactestval; 234 testval = dev_priv->vbios.tvdactestval;
235 } else { 235 } else {
236 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ 236 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
237 237
238 if (dev_priv->vbios->dactestval) 238 if (dev_priv->vbios.dactestval)
239 testval = dev_priv->vbios->dactestval; 239 testval = dev_priv->vbios.dactestval;
240 } 240 }
241 241
242 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 242 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 483f875bdb6a..41634d4752fe 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; 269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
270 if (!nv_gf4_disp_arch(dev) || 270 if (!nv_gf4_disp_arch(dev) ||
271 (output_mode->hsync_start - output_mode->hdisplay) >= 271 (output_mode->hsync_start - output_mode->hdisplay) >=
272 dev_priv->vbios->digital_min_front_porch) 272 dev_priv->vbios.digital_min_front_porch)
273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; 273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
274 else 274 else
275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1; 275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; 276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; 277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; 278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ef77215fa5b9..c7898b4f6dfb 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -93,10 +93,9 @@ int
93nv04_display_create(struct drm_device *dev) 93nv04_display_create(struct drm_device *dev)
94{ 94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private; 95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct parsed_dcb *dcb = dev_priv->vbios->dcb; 96 struct dcb_table *dcb = &dev_priv->vbios.dcb;
97 struct drm_encoder *encoder; 97 struct drm_encoder *encoder;
98 struct drm_crtc *crtc; 98 struct drm_crtc *crtc;
99 uint16_t connector[16] = { 0 };
100 int i, ret; 99 int i, ret;
101 100
102 NV_DEBUG_KMS(dev, "\n"); 101 NV_DEBUG_KMS(dev, "\n");
@@ -154,52 +153,10 @@ nv04_display_create(struct drm_device *dev)
154 153
155 if (ret) 154 if (ret)
156 continue; 155 continue;
157
158 connector[dcbent->connector] |= (1 << dcbent->type);
159 } 156 }
160 157
161 for (i = 0; i < dcb->entries; i++) { 158 for (i = 0; i < dcb->connector.entries; i++)
162 struct dcb_entry *dcbent = &dcb->entry[i]; 159 nouveau_connector_create(dev, &dcb->connector.entry[i]);
163 uint16_t encoders;
164 int type;
165
166 encoders = connector[dcbent->connector];
167 if (!(encoders & (1 << dcbent->type)))
168 continue;
169 connector[dcbent->connector] = 0;
170
171 switch (dcbent->type) {
172 case OUTPUT_ANALOG:
173 if (!MULTIPLE_ENCODERS(encoders))
174 type = DRM_MODE_CONNECTOR_VGA;
175 else
176 type = DRM_MODE_CONNECTOR_DVII;
177 break;
178 case OUTPUT_TMDS:
179 if (!MULTIPLE_ENCODERS(encoders))
180 type = DRM_MODE_CONNECTOR_DVID;
181 else
182 type = DRM_MODE_CONNECTOR_DVII;
183 break;
184 case OUTPUT_LVDS:
185 type = DRM_MODE_CONNECTOR_LVDS;
186#if 0
187 /* don't create i2c adapter when lvds ddc not allowed */
188 if (dcbent->lvdsconf.use_straps_for_mode ||
189 dev_priv->vbios->fp_no_ddc)
190 i2c_index = 0xf;
191#endif
192 break;
193 case OUTPUT_TV:
194 type = DRM_MODE_CONNECTOR_TV;
195 break;
196 default:
197 type = DRM_MODE_CONNECTOR_Unknown;
198 continue;
199 }
200
201 nouveau_connector_create(dev, dcbent->connector, type);
202 }
203 160
204 /* Save previous state */ 161 /* Save previous state */
205 NVLockVgaCrtcs(dev, false); 162 NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fd01caabd5c3..3da90c2c4e63 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,7 +118,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118 return; 118 return;
119 } 119 }
120 120
121 width = (image->width + 31) & ~31; 121 width = ALIGN(image->width, 32);
122 dsize = (width * image->height) >> 5; 122 dsize = (width * image->height) >> 5;
123 123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f31347b8c9b0..66fe55983b6e 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
117{ 117{
118 struct drm_device *dev = chan->dev; 118 struct drm_device *dev = chan->dev;
119 struct drm_nouveau_private *dev_priv = dev->dev_private; 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 unsigned long flags;
120 int ret; 121 int ret;
121 122
122 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, 123 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
127 if (ret) 128 if (ret)
128 return ret; 129 return ret;
129 130
131 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
132
130 /* Setup initial state */ 133 /* Setup initial state */
131 dev_priv->engine.instmem.prepare_access(dev, true); 134 dev_priv->engine.instmem.prepare_access(dev, true);
132 RAMFC_WR(DMA_PUT, chan->pushbuf_base); 135 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
144 /* enable the fifo dma operation */ 147 /* enable the fifo dma operation */
145 nv_wr32(dev, NV04_PFIFO_MODE, 148 nv_wr32(dev, NV04_PFIFO_MODE,
146 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 149 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
150
151 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
147 return 0; 152 return 0;
148} 153}
149 154
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 9c63099e9c42..c4e3404337d4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -262,7 +262,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
262 nv_encoder->or = ffs(entry->or) - 1; 262 nv_encoder->or = ffs(entry->or) - 1;
263 263
264 /* Run the slave-specific initialization */ 264 /* Run the slave-specific initialization */
265 adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter; 265 adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
266 266
267 was_locked = NVLockVgaCrtcs(dev, false); 267 was_locked = NVLockVgaCrtcs(dev, false);
268 268
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 21ac6e49b6ee..74c880374fb9 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
45 45
46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); 47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
48 if (dev_priv->vbios->tvdactestval) 48 if (dev_priv->vbios.tvdactestval)
49 testval = dev_priv->vbios->tvdactestval; 49 testval = dev_priv->vbios.tvdactestval;
50 50
51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); 51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
52 head = (dacclk & 0x100) >> 8; 52 head = (dacclk & 0x100) >> 8;
@@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
367 !enc->crtc && 367 !enc->crtc &&
368 nv04_dfp_get_bound_head(dev, dcb) == head) { 368 nv04_dfp_get_bound_head(dev, dcb) == head) {
369 nv04_dfp_bind_head(dev, dcb, head ^ 1, 369 nv04_dfp_bind_head(dev, dcb, head ^ 1,
370 dev_priv->VBIOS.fp.dual_link); 370 dev_priv->vbios.fp.dual_link);
371 } 371 }
372 } 372 }
373 373
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index b4f19ccb8b41..6b2ef4a9fce1 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
37 struct drm_device *dev = chan->dev; 37 struct drm_device *dev = chan->dev;
38 struct drm_nouveau_private *dev_priv = dev->dev_private; 38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 uint32_t fc = NV40_RAMFC(chan->id); 39 uint32_t fc = NV40_RAMFC(chan->id);
40 unsigned long flags;
40 int ret; 41 int ret;
41 42
42 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, 43 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
45 if (ret) 46 if (ret)
46 return ret; 47 return ret;
47 48
49 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
50
48 dev_priv->engine.instmem.prepare_access(dev, true); 51 dev_priv->engine.instmem.prepare_access(dev, true);
49 nv_wi32(dev, fc + 0, chan->pushbuf_base); 52 nv_wi32(dev, fc + 0, chan->pushbuf_base);
50 nv_wi32(dev, fc + 4, chan->pushbuf_base); 53 nv_wi32(dev, fc + 4, chan->pushbuf_base);
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
63 /* enable the fifo dma operation */ 66 /* enable the fifo dma operation */
64 nv_wr32(dev, NV04_PFIFO_MODE, 67 nv_wr32(dev, NV04_PFIFO_MODE,
65 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 68 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
69
70 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
66 return 0; 71 return 0;
67} 72}
68 73
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index d1a651e3400c..cfabeb974a56 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
358 nv_crtc->cursor.show(nv_crtc, true); 358 nv_crtc->cursor.show(nv_crtc, true);
359 359
360out: 360out:
361 mutex_lock(&dev->struct_mutex); 361 drm_gem_object_unreference_unlocked(gem);
362 drm_gem_object_unreference(gem);
363 mutex_unlock(&dev->struct_mutex);
364 return ret; 362 return ret;
365} 363}
366 364
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index f08f042a8e10..1fd9537beff6 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
79 } 79 }
80 80
81 /* Use bios provided value if possible. */ 81 /* Use bios provided value if possible. */
82 if (dev_priv->vbios->dactestval) { 82 if (dev_priv->vbios.dactestval) {
83 load_pattern = dev_priv->vbios->dactestval; 83 load_pattern = dev_priv->vbios.dactestval;
84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", 84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
85 load_pattern); 85 load_pattern);
86 } else { 86 } else {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 90f0bf59fbcd..61a89f2dc553 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -370,9 +370,7 @@ nv50_display_init(struct drm_device *dev)
370 struct nouveau_connector *conn = nouveau_connector(connector); 370 struct nouveau_connector *conn = nouveau_connector(connector);
371 struct dcb_gpio_entry *gpio; 371 struct dcb_gpio_entry *gpio;
372 372
373 if (connector->connector_type != DRM_MODE_CONNECTOR_DVII && 373 if (conn->dcb->gpio_tag == 0xff)
374 connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
375 connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
376 continue; 374 continue;
377 375
378 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag); 376 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
@@ -465,8 +463,7 @@ static int nv50_display_disable(struct drm_device *dev)
465int nv50_display_create(struct drm_device *dev) 463int nv50_display_create(struct drm_device *dev)
466{ 464{
467 struct drm_nouveau_private *dev_priv = dev->dev_private; 465 struct drm_nouveau_private *dev_priv = dev->dev_private;
468 struct parsed_dcb *dcb = dev_priv->vbios->dcb; 466 struct dcb_table *dcb = &dev_priv->vbios.dcb;
469 uint32_t connector[16] = {};
470 int ret, i; 467 int ret, i;
471 468
472 NV_DEBUG_KMS(dev, "\n"); 469 NV_DEBUG_KMS(dev, "\n");
@@ -522,44 +519,13 @@ int nv50_display_create(struct drm_device *dev)
522 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); 519 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
523 continue; 520 continue;
524 } 521 }
525
526 connector[entry->connector] |= (1 << entry->type);
527 } 522 }
528 523
529 /* It appears that DCB 3.0+ VBIOS has a connector table, however, 524 for (i = 0 ; i < dcb->connector.entries; i++) {
530 * I'm not 100% certain how to decode it correctly yet so just 525 if (i != 0 && dcb->connector.entry[i].index ==
531 * look at what encoders are present on each connector index and 526 dcb->connector.entry[i - 1].index)
532 * attempt to derive the connector type from that.
533 */
534 for (i = 0 ; i < dcb->entries; i++) {
535 struct dcb_entry *entry = &dcb->entry[i];
536 uint16_t encoders;
537 int type;
538
539 encoders = connector[entry->connector];
540 if (!(encoders & (1 << entry->type)))
541 continue; 527 continue;
542 connector[entry->connector] = 0; 528 nouveau_connector_create(dev, &dcb->connector.entry[i]);
543
544 if (encoders & (1 << OUTPUT_DP)) {
545 type = DRM_MODE_CONNECTOR_DisplayPort;
546 } else if (encoders & (1 << OUTPUT_TMDS)) {
547 if (encoders & (1 << OUTPUT_ANALOG))
548 type = DRM_MODE_CONNECTOR_DVII;
549 else
550 type = DRM_MODE_CONNECTOR_DVID;
551 } else if (encoders & (1 << OUTPUT_ANALOG)) {
552 type = DRM_MODE_CONNECTOR_VGA;
553 } else if (encoders & (1 << OUTPUT_LVDS)) {
554 type = DRM_MODE_CONNECTOR_LVDS;
555 } else {
556 type = DRM_MODE_CONNECTOR_Unknown;
557 }
558
559 if (type == DRM_MODE_CONNECTOR_Unknown)
560 continue;
561
562 nouveau_connector_create(dev, entry->connector, type);
563 } 529 }
564 530
565 ret = nv50_display_init(dev); 531 ret = nv50_display_init(dev);
@@ -667,8 +633,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
667 return -1; 633 return -1;
668 } 634 }
669 635
670 for (i = 0; i < dev_priv->vbios->dcb->entries; i++) { 636 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
671 struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i]; 637 struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
672 638
673 if (dcbent->type != type) 639 if (dcbent->type != type)
674 continue; 640 continue;
@@ -692,7 +658,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 658 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 struct nouveau_connector *nv_connector = NULL; 659 struct nouveau_connector *nv_connector = NULL;
694 struct drm_encoder *encoder; 660 struct drm_encoder *encoder;
695 struct nvbios *bios = &dev_priv->VBIOS; 661 struct nvbios *bios = &dev_priv->vbios;
696 uint32_t mc, script = 0, or; 662 uint32_t mc, script = 0, or;
697 663
698 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 664 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -710,7 +676,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
710 switch (dcbent->type) { 676 switch (dcbent->type) {
711 case OUTPUT_LVDS: 677 case OUTPUT_LVDS:
712 script = (mc >> 8) & 0xf; 678 script = (mc >> 8) & 0xf;
713 if (bios->pub.fp_no_ddc) { 679 if (bios->fp_no_ddc) {
714 if (bios->fp.dual_link) 680 if (bios->fp.dual_link)
715 script |= 0x0100; 681 script |= 0x0100;
716 if (bios->fp.if_is_24bit) 682 if (bios->fp.if_is_24bit)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 0f57cdf7ccb2..993c7126fbde 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
109 return; 109 return;
110 } 110 }
111 111
112 width = (image->width + 31) & ~31; 112 width = ALIGN(image->width, 32);
113 dwords = (width * image->height) >> 5; 113 dwords = (width * image->height) >> 5;
114 114
115 BEGIN_RING(chan, NvSub2D, 0x0814, 2); 115 BEGIN_RING(chan, NvSub2D, 0x0814, 2);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 204a79ff10f4..e20c0e2474f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
243 struct drm_device *dev = chan->dev; 243 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private; 244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *ramfc = NULL; 245 struct nouveau_gpuobj *ramfc = NULL;
246 unsigned long flags;
246 int ret; 247 int ret;
247 248
248 NV_DEBUG(dev, "ch%d\n", chan->id); 249 NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -278,19 +279,21 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
278 return ret; 279 return ret;
279 } 280 }
280 281
282 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
283
281 dev_priv->engine.instmem.prepare_access(dev, true); 284 dev_priv->engine.instmem.prepare_access(dev, true);
282 285
283 nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
284 nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
285 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); 286 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
286 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); 287 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
287 nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); 288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); 289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000); 290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); 291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000); 292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
293 nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff); 293 nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
294 nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
295 chan->dma.ib_base * 4);
296 nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
294 297
295 if (!IS_G80) { 298 if (!IS_G80) {
296 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); 299 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
306 ret = nv50_fifo_channel_enable(dev, chan->id, false); 309 ret = nv50_fifo_channel_enable(dev, chan->id, false);
307 if (ret) { 310 if (ret) {
308 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); 311 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
312 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 313 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 return ret; 314 return ret;
311 } 315 }
312 316
317 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
313 return 0; 318 return 0;
314} 319}
315 320
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 6d504801b514..857a09671a39 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -28,30 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30 30
31MODULE_FIRMWARE("nouveau/nv50.ctxprog"); 31#include "nouveau_grctx.h"
32MODULE_FIRMWARE("nouveau/nv50.ctxvals");
33MODULE_FIRMWARE("nouveau/nv84.ctxprog");
34MODULE_FIRMWARE("nouveau/nv84.ctxvals");
35MODULE_FIRMWARE("nouveau/nv86.ctxprog");
36MODULE_FIRMWARE("nouveau/nv86.ctxvals");
37MODULE_FIRMWARE("nouveau/nv92.ctxprog");
38MODULE_FIRMWARE("nouveau/nv92.ctxvals");
39MODULE_FIRMWARE("nouveau/nv94.ctxprog");
40MODULE_FIRMWARE("nouveau/nv94.ctxvals");
41MODULE_FIRMWARE("nouveau/nv96.ctxprog");
42MODULE_FIRMWARE("nouveau/nv96.ctxvals");
43MODULE_FIRMWARE("nouveau/nv98.ctxprog");
44MODULE_FIRMWARE("nouveau/nv98.ctxvals");
45MODULE_FIRMWARE("nouveau/nva0.ctxprog");
46MODULE_FIRMWARE("nouveau/nva0.ctxvals");
47MODULE_FIRMWARE("nouveau/nva5.ctxprog");
48MODULE_FIRMWARE("nouveau/nva5.ctxvals");
49MODULE_FIRMWARE("nouveau/nva8.ctxprog");
50MODULE_FIRMWARE("nouveau/nva8.ctxvals");
51MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
52MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
53MODULE_FIRMWARE("nouveau/nvac.ctxprog");
54MODULE_FIRMWARE("nouveau/nvac.ctxvals");
55 32
56#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) 33#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
57 34
@@ -111,9 +88,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
111 88
112 NV_DEBUG(dev, "\n"); 89 NV_DEBUG(dev, "\n");
113 90
114 nouveau_grctx_prog_load(dev); 91 if (nouveau_ctxfw) {
115 if (!dev_priv->engine.graph.ctxprog) 92 nouveau_grctx_prog_load(dev);
116 dev_priv->engine.graph.accel_blocked = true; 93 dev_priv->engine.graph.grctx_size = 0x70000;
94 }
95 if (!dev_priv->engine.graph.ctxprog) {
96 struct nouveau_grctx ctx = {};
97 uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
98 int i;
99 if (!cp) {
100 NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
101 dev_priv->engine.graph.accel_blocked = true;
102 return 0;
103 }
104 ctx.dev = dev;
105 ctx.mode = NOUVEAU_GRCTX_PROG;
106 ctx.data = cp;
107 ctx.ctxprog_max = 512;
108 if (!nv50_grctx_init(&ctx)) {
109 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
110
111 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
112 for (i = 0; i < ctx.ctxprog_len; i++)
113 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
114 } else {
115 dev_priv->engine.graph.accel_blocked = true;
116 }
117 kfree(cp);
118 }
117 119
118 nv_wr32(dev, 0x400320, 4); 120 nv_wr32(dev, 0x400320, 4);
119 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); 121 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -193,13 +195,13 @@ nv50_graph_create_context(struct nouveau_channel *chan)
193 struct drm_nouveau_private *dev_priv = dev->dev_private; 195 struct drm_nouveau_private *dev_priv = dev->dev_private;
194 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; 196 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
195 struct nouveau_gpuobj *ctx; 197 struct nouveau_gpuobj *ctx;
196 uint32_t grctx_size = 0x70000; 198 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
197 int hdr, ret; 199 int hdr, ret;
198 200
199 NV_DEBUG(dev, "ch%d\n", chan->id); 201 NV_DEBUG(dev, "ch%d\n", chan->id);
200 202
201 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, 203 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
202 NVOBJ_FLAG_ZERO_ALLOC | 204 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
203 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 205 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
204 if (ret) 206 if (ret)
205 return ret; 207 return ret;
@@ -209,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
209 dev_priv->engine.instmem.prepare_access(dev, true); 211 dev_priv->engine.instmem.prepare_access(dev, true);
210 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); 212 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
211 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + 213 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
212 grctx_size - 1); 214 pgraph->grctx_size - 1);
213 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); 215 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
214 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); 216 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
215 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); 217 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
@@ -217,7 +219,15 @@ nv50_graph_create_context(struct nouveau_channel *chan)
217 dev_priv->engine.instmem.finish_access(dev); 219 dev_priv->engine.instmem.finish_access(dev);
218 220
219 dev_priv->engine.instmem.prepare_access(dev, true); 221 dev_priv->engine.instmem.prepare_access(dev, true);
220 nouveau_grctx_vals_load(dev, ctx); 222 if (!pgraph->ctxprog) {
223 struct nouveau_grctx ctx = {};
224 ctx.dev = chan->dev;
225 ctx.mode = NOUVEAU_GRCTX_VALS;
226 ctx.data = chan->ramin_grctx->gpuobj;
227 nv50_grctx_init(&ctx);
228 } else {
229 nouveau_grctx_vals_load(dev, ctx);
230 }
221 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); 231 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
222 if ((dev_priv->chipset & 0xf0) == 0xa0) 232 if ((dev_priv->chipset & 0xf0) == 0xa0)
223 nv_wo32(dev, ctx, 0x00004/4, 0x00000000); 233 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
new file mode 100644
index 000000000000..d105fcd42ca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -0,0 +1,2367 @@
1/*
2 * Copyright 2009 Marcin Kościelnicki
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define CP_FLAG_CLEAR 0
24#define CP_FLAG_SET 1
25#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
26#define CP_FLAG_SWAP_DIRECTION_LOAD 0
27#define CP_FLAG_SWAP_DIRECTION_SAVE 1
28#define CP_FLAG_UNK01 ((0 * 32) + 1)
29#define CP_FLAG_UNK01_CLEAR 0
30#define CP_FLAG_UNK01_SET 1
31#define CP_FLAG_UNK03 ((0 * 32) + 3)
32#define CP_FLAG_UNK03_CLEAR 0
33#define CP_FLAG_UNK03_SET 1
34#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
35#define CP_FLAG_USER_SAVE_NOT_PENDING 0
36#define CP_FLAG_USER_SAVE_PENDING 1
37#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
38#define CP_FLAG_USER_LOAD_NOT_PENDING 0
39#define CP_FLAG_USER_LOAD_PENDING 1
40#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
41#define CP_FLAG_UNK0B_CLEAR 0
42#define CP_FLAG_UNK0B_SET 1
43#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
44#define CP_FLAG_UNK1D_CLEAR 0
45#define CP_FLAG_UNK1D_SET 1
46#define CP_FLAG_UNK20 ((1 * 32) + 0)
47#define CP_FLAG_UNK20_CLEAR 0
48#define CP_FLAG_UNK20_SET 1
49#define CP_FLAG_STATUS ((2 * 32) + 0)
50#define CP_FLAG_STATUS_BUSY 0
51#define CP_FLAG_STATUS_IDLE 1
52#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
53#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
54#define CP_FLAG_AUTO_SAVE_PENDING 1
55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
57#define CP_FLAG_AUTO_LOAD_PENDING 1
58#define CP_FLAG_XFER ((2 * 32) + 11)
59#define CP_FLAG_XFER_IDLE 0
60#define CP_FLAG_XFER_BUSY 1
61#define CP_FLAG_NEWCTX ((2 * 32) + 12)
62#define CP_FLAG_NEWCTX_BUSY 0
63#define CP_FLAG_NEWCTX_DONE 1
64#define CP_FLAG_ALWAYS ((2 * 32) + 13)
65#define CP_FLAG_ALWAYS_FALSE 0
66#define CP_FLAG_ALWAYS_TRUE 1
67
68#define CP_CTX 0x00100000
69#define CP_CTX_COUNT 0x000f0000
70#define CP_CTX_COUNT_SHIFT 16
71#define CP_CTX_REG 0x00003fff
72#define CP_LOAD_SR 0x00200000
73#define CP_LOAD_SR_VALUE 0x000fffff
74#define CP_BRA 0x00400000
75#define CP_BRA_IP 0x0001ff00
76#define CP_BRA_IP_SHIFT 8
77#define CP_BRA_IF_CLEAR 0x00000080
78#define CP_BRA_FLAG 0x0000007f
79#define CP_WAIT 0x00500000
80#define CP_WAIT_SET 0x00000080
81#define CP_WAIT_FLAG 0x0000007f
82#define CP_SET 0x00700000
83#define CP_SET_1 0x00000080
84#define CP_SET_FLAG 0x0000007f
85#define CP_NEWCTX 0x00600004
86#define CP_NEXT_TO_SWAP 0x00600005
87#define CP_SET_CONTEXT_POINTER 0x00600006
88#define CP_SET_XFER_POINTER 0x00600007
89#define CP_ENABLE 0x00600009
90#define CP_END 0x0060000c
91#define CP_NEXT_TO_CURRENT 0x0060000d
92#define CP_DISABLE1 0x0090ffff
93#define CP_DISABLE2 0x0091ffff
94#define CP_XFER_1 0x008000ff
95#define CP_XFER_2 0x008800ff
96#define CP_SEEK_1 0x00c000ff
97#define CP_SEEK_2 0x00c800ff
98
99#include "drmP.h"
100#include "nouveau_drv.h"
101#include "nouveau_grctx.h"
102
103/*
104 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
105 * the GPU itself that does context-switching, but it needs a special
106 * microcode to do it. And it's the driver's task to supply this microcode,
107 * further known as ctxprog, as well as the initial context values, known
108 * as ctxvals.
109 *
110 * Without ctxprog, you cannot switch contexts. Not even in software, since
111 * the majority of context [xfer strands] isn't accessible directly. You're
112 * stuck with a single channel, and you also suffer all the problems resulting
113 * from missing ctxvals, since you cannot load them.
114 *
115 * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
116 * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
117 * since you don't have... some sort of needed setup.
118 *
119 * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
120 * it's too much hassle to handle no-ctxprog as a special case.
121 */
122
123/*
124 * How ctxprogs work.
125 *
126 * The ctxprog is written in its own kind of microcode, with very small and
127 * crappy set of available commands. You upload it to a small [512 insns]
128 * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
129 * switch channel. or when the driver explicitely requests it. Stuff visible
130 * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
131 * the per-channel context save area in VRAM [known as ctxvals or grctx],
132 * 4 flags registers, a scratch register, two grctx pointers, plus many
133 * random poorly-understood details.
134 *
135 * When ctxprog runs, it's supposed to check what operations are asked of it,
136 * save old context if requested, optionally reset PGRAPH and switch to the
137 * new channel, and load the new context. Context consists of three major
138 * parts: subset of MMIO registers and two "xfer areas".
139 */
140
141/* TODO:
142 * - document unimplemented bits compared to nvidia
143 * - NVAx: make a TP subroutine, use it.
144 * - use 0x4008fc instead of 0x1540?
145 */
146
147enum cp_label {
148 cp_check_load = 1,
149 cp_setup_auto_load,
150 cp_setup_load,
151 cp_setup_save,
152 cp_swap_state,
153 cp_prepare_exit,
154 cp_exit,
155};
156
157static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
158static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
159static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
160
161/* Main function: construct the ctxprog skeleton, call the other functions. */
162
163int
164nv50_grctx_init(struct nouveau_grctx *ctx)
165{
166 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
167
168 switch (dev_priv->chipset) {
169 case 0x50:
170 case 0x84:
171 case 0x86:
172 case 0x92:
173 case 0x94:
174 case 0x96:
175 case 0x98:
176 case 0xa0:
177 case 0xa5:
178 case 0xa8:
179 case 0xaa:
180 case 0xac:
181 break;
182 default:
183 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
184 "your NV%x card.\n", dev_priv->chipset);
185 NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
186 "the devs.\n");
187 return -ENOSYS;
188 }
189 /* decide whether we're loading/unloading the context */
190 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
191 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
192
193 cp_name(ctx, cp_check_load);
194 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
195 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
196 cp_bra (ctx, ALWAYS, TRUE, cp_exit);
197
198 /* setup for context load */
199 cp_name(ctx, cp_setup_auto_load);
200 cp_out (ctx, CP_DISABLE1);
201 cp_out (ctx, CP_DISABLE2);
202 cp_out (ctx, CP_ENABLE);
203 cp_out (ctx, CP_NEXT_TO_SWAP);
204 cp_set (ctx, UNK01, SET);
205 cp_name(ctx, cp_setup_load);
206 cp_out (ctx, CP_NEWCTX);
207 cp_wait(ctx, NEWCTX, BUSY);
208 cp_set (ctx, UNK1D, CLEAR);
209 cp_set (ctx, SWAP_DIRECTION, LOAD);
210 cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
211 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
212
213 /* setup for context save */
214 cp_name(ctx, cp_setup_save);
215 cp_set (ctx, UNK1D, SET);
216 cp_wait(ctx, STATUS, BUSY);
217 cp_set (ctx, UNK01, SET);
218 cp_set (ctx, SWAP_DIRECTION, SAVE);
219
220 /* general PGRAPH state */
221 cp_name(ctx, cp_swap_state);
222 cp_set (ctx, UNK03, SET);
223 cp_pos (ctx, 0x00004/4);
224 cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
225 cp_pos (ctx, 0x00100/4);
226 nv50_graph_construct_mmio(ctx);
227 nv50_graph_construct_xfer1(ctx);
228 nv50_graph_construct_xfer2(ctx);
229
230 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
231
232 cp_set (ctx, UNK20, SET);
233 cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
234 cp_lsr (ctx, ctx->ctxvals_base);
235 cp_out (ctx, CP_SET_XFER_POINTER);
236 cp_lsr (ctx, 4);
237 cp_out (ctx, CP_SEEK_1);
238 cp_out (ctx, CP_XFER_1);
239 cp_wait(ctx, XFER, BUSY);
240
241 /* pre-exit state updates */
242 cp_name(ctx, cp_prepare_exit);
243 cp_set (ctx, UNK01, CLEAR);
244 cp_set (ctx, UNK03, CLEAR);
245 cp_set (ctx, UNK1D, CLEAR);
246
247 cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
248 cp_out (ctx, CP_NEXT_TO_CURRENT);
249
250 cp_name(ctx, cp_exit);
251 cp_set (ctx, USER_SAVE, NOT_PENDING);
252 cp_set (ctx, USER_LOAD, NOT_PENDING);
253 cp_out (ctx, CP_END);
254 ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
255
256 return 0;
257}
258
259/*
260 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
261 * registers to save/restore and the default values for them.
262 */
263
264static void
265nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
266{
267 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
268 int i, j;
269 int offset, base;
270 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
271
272 /* 0800 */
273 cp_ctx(ctx, 0x400808, 7);
274 gr_def(ctx, 0x400814, 0x00000030);
275 cp_ctx(ctx, 0x400834, 0x32);
276 if (dev_priv->chipset == 0x50) {
277 gr_def(ctx, 0x400834, 0xff400040);
278 gr_def(ctx, 0x400838, 0xfff00080);
279 gr_def(ctx, 0x40083c, 0xfff70090);
280 gr_def(ctx, 0x400840, 0xffe806a8);
281 }
282 gr_def(ctx, 0x400844, 0x00000002);
283 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
284 gr_def(ctx, 0x400894, 0x00001000);
285 gr_def(ctx, 0x4008e8, 0x00000003);
286 gr_def(ctx, 0x4008ec, 0x00001000);
287 if (dev_priv->chipset == 0x50)
288 cp_ctx(ctx, 0x400908, 0xb);
289 else if (dev_priv->chipset < 0xa0)
290 cp_ctx(ctx, 0x400908, 0xc);
291 else
292 cp_ctx(ctx, 0x400908, 0xe);
293
294 if (dev_priv->chipset >= 0xa0)
295 cp_ctx(ctx, 0x400b00, 0x1);
296 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
297 cp_ctx(ctx, 0x400b10, 0x1);
298 gr_def(ctx, 0x400b10, 0x0001629d);
299 cp_ctx(ctx, 0x400b20, 0x1);
300 gr_def(ctx, 0x400b20, 0x0001629d);
301 }
302
303 /* 0C00 */
304 cp_ctx(ctx, 0x400c08, 0x2);
305 gr_def(ctx, 0x400c08, 0x0000fe0c);
306
307 /* 1000 */
308 if (dev_priv->chipset < 0xa0) {
309 cp_ctx(ctx, 0x401008, 0x4);
310 gr_def(ctx, 0x401014, 0x00001000);
311 } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
312 cp_ctx(ctx, 0x401008, 0x5);
313 gr_def(ctx, 0x401018, 0x00001000);
314 } else {
315 cp_ctx(ctx, 0x401008, 0x5);
316 gr_def(ctx, 0x401018, 0x00004000);
317 }
318
319 /* 1400 */
320 cp_ctx(ctx, 0x401400, 0x8);
321 cp_ctx(ctx, 0x401424, 0x3);
322 if (dev_priv->chipset == 0x50)
323 gr_def(ctx, 0x40142c, 0x0001fd87);
324 else
325 gr_def(ctx, 0x40142c, 0x00000187);
326 cp_ctx(ctx, 0x401540, 0x5);
327 gr_def(ctx, 0x401550, 0x00001018);
328
329 /* 1800 */
330 cp_ctx(ctx, 0x401814, 0x1);
331 gr_def(ctx, 0x401814, 0x000000ff);
332 if (dev_priv->chipset == 0x50) {
333 cp_ctx(ctx, 0x40181c, 0xe);
334 gr_def(ctx, 0x401850, 0x00000004);
335 } else if (dev_priv->chipset < 0xa0) {
336 cp_ctx(ctx, 0x40181c, 0xf);
337 gr_def(ctx, 0x401854, 0x00000004);
338 } else {
339 cp_ctx(ctx, 0x40181c, 0x13);
340 gr_def(ctx, 0x401864, 0x00000004);
341 }
342
343 /* 1C00 */
344 cp_ctx(ctx, 0x401c00, 0x1);
345 switch (dev_priv->chipset) {
346 case 0x50:
347 gr_def(ctx, 0x401c00, 0x0001005f);
348 break;
349 case 0x84:
350 case 0x86:
351 case 0x94:
352 gr_def(ctx, 0x401c00, 0x044d00df);
353 break;
354 case 0x92:
355 case 0x96:
356 case 0x98:
357 case 0xa0:
358 case 0xaa:
359 case 0xac:
360 gr_def(ctx, 0x401c00, 0x042500df);
361 break;
362 case 0xa5:
363 case 0xa8:
364 gr_def(ctx, 0x401c00, 0x142500df);
365 break;
366 }
367
368 /* 2400 */
369 cp_ctx(ctx, 0x402400, 0x1);
370 if (dev_priv->chipset == 0x50)
371 cp_ctx(ctx, 0x402408, 0x1);
372 else
373 cp_ctx(ctx, 0x402408, 0x2);
374 gr_def(ctx, 0x402408, 0x00000600);
375
376 /* 2800 */
377 cp_ctx(ctx, 0x402800, 0x1);
378 if (dev_priv->chipset == 0x50)
379 gr_def(ctx, 0x402800, 0x00000006);
380
381 /* 2C00 */
382 cp_ctx(ctx, 0x402c08, 0x6);
383 if (dev_priv->chipset != 0x50)
384 gr_def(ctx, 0x402c14, 0x01000000);
385 gr_def(ctx, 0x402c18, 0x000000ff);
386 if (dev_priv->chipset == 0x50)
387 cp_ctx(ctx, 0x402ca0, 0x1);
388 else
389 cp_ctx(ctx, 0x402ca0, 0x2);
390 if (dev_priv->chipset < 0xa0)
391 gr_def(ctx, 0x402ca0, 0x00000400);
392 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
393 gr_def(ctx, 0x402ca0, 0x00000800);
394 else
395 gr_def(ctx, 0x402ca0, 0x00000400);
396 cp_ctx(ctx, 0x402cac, 0x4);
397
398 /* 3000 */
399 cp_ctx(ctx, 0x403004, 0x1);
400 gr_def(ctx, 0x403004, 0x00000001);
401
402 /* 3404 */
403 if (dev_priv->chipset >= 0xa0) {
404 cp_ctx(ctx, 0x403404, 0x1);
405 gr_def(ctx, 0x403404, 0x00000001);
406 }
407
408 /* 5000 */
409 cp_ctx(ctx, 0x405000, 0x1);
410 switch (dev_priv->chipset) {
411 case 0x50:
412 gr_def(ctx, 0x405000, 0x00300080);
413 break;
414 case 0x84:
415 case 0xa0:
416 case 0xa5:
417 case 0xa8:
418 case 0xaa:
419 case 0xac:
420 gr_def(ctx, 0x405000, 0x000e0080);
421 break;
422 case 0x86:
423 case 0x92:
424 case 0x94:
425 case 0x96:
426 case 0x98:
427 gr_def(ctx, 0x405000, 0x00000080);
428 break;
429 }
430 cp_ctx(ctx, 0x405014, 0x1);
431 gr_def(ctx, 0x405014, 0x00000004);
432 cp_ctx(ctx, 0x40501c, 0x1);
433 cp_ctx(ctx, 0x405024, 0x1);
434 cp_ctx(ctx, 0x40502c, 0x1);
435
436 /* 5400 or maybe 4800 */
437 if (dev_priv->chipset == 0x50) {
438 offset = 0x405400;
439 cp_ctx(ctx, 0x405400, 0xea);
440 } else if (dev_priv->chipset < 0x94) {
441 offset = 0x405400;
442 cp_ctx(ctx, 0x405400, 0xcb);
443 } else if (dev_priv->chipset < 0xa0) {
444 offset = 0x405400;
445 cp_ctx(ctx, 0x405400, 0xcc);
446 } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
447 offset = 0x404800;
448 cp_ctx(ctx, 0x404800, 0xda);
449 } else {
450 offset = 0x405400;
451 cp_ctx(ctx, 0x405400, 0xd4);
452 }
453 gr_def(ctx, offset + 0x0c, 0x00000002);
454 gr_def(ctx, offset + 0x10, 0x00000001);
455 if (dev_priv->chipset >= 0x94)
456 offset += 4;
457 gr_def(ctx, offset + 0x1c, 0x00000001);
458 gr_def(ctx, offset + 0x20, 0x00000100);
459 gr_def(ctx, offset + 0x38, 0x00000002);
460 gr_def(ctx, offset + 0x3c, 0x00000001);
461 gr_def(ctx, offset + 0x40, 0x00000001);
462 gr_def(ctx, offset + 0x50, 0x00000001);
463 gr_def(ctx, offset + 0x54, 0x003fffff);
464 gr_def(ctx, offset + 0x58, 0x00001fff);
465 gr_def(ctx, offset + 0x60, 0x00000001);
466 gr_def(ctx, offset + 0x64, 0x00000001);
467 gr_def(ctx, offset + 0x6c, 0x00000001);
468 gr_def(ctx, offset + 0x70, 0x00000001);
469 gr_def(ctx, offset + 0x74, 0x00000001);
470 gr_def(ctx, offset + 0x78, 0x00000004);
471 gr_def(ctx, offset + 0x7c, 0x00000001);
472 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
473 offset += 4;
474 gr_def(ctx, offset + 0x80, 0x00000001);
475 gr_def(ctx, offset + 0x84, 0x00000001);
476 gr_def(ctx, offset + 0x88, 0x00000007);
477 gr_def(ctx, offset + 0x8c, 0x00000001);
478 gr_def(ctx, offset + 0x90, 0x00000007);
479 gr_def(ctx, offset + 0x94, 0x00000001);
480 gr_def(ctx, offset + 0x98, 0x00000001);
481 gr_def(ctx, offset + 0x9c, 0x00000001);
482 if (dev_priv->chipset == 0x50) {
483 gr_def(ctx, offset + 0xb0, 0x00000001);
484 gr_def(ctx, offset + 0xb4, 0x00000001);
485 gr_def(ctx, offset + 0xbc, 0x00000001);
486 gr_def(ctx, offset + 0xc0, 0x0000000a);
487 gr_def(ctx, offset + 0xd0, 0x00000040);
488 gr_def(ctx, offset + 0xd8, 0x00000002);
489 gr_def(ctx, offset + 0xdc, 0x00000100);
490 gr_def(ctx, offset + 0xe0, 0x00000001);
491 gr_def(ctx, offset + 0xe4, 0x00000100);
492 gr_def(ctx, offset + 0x100, 0x00000001);
493 gr_def(ctx, offset + 0x124, 0x00000004);
494 gr_def(ctx, offset + 0x13c, 0x00000001);
495 gr_def(ctx, offset + 0x140, 0x00000100);
496 gr_def(ctx, offset + 0x148, 0x00000001);
497 gr_def(ctx, offset + 0x154, 0x00000100);
498 gr_def(ctx, offset + 0x158, 0x00000001);
499 gr_def(ctx, offset + 0x15c, 0x00000100);
500 gr_def(ctx, offset + 0x164, 0x00000001);
501 gr_def(ctx, offset + 0x170, 0x00000100);
502 gr_def(ctx, offset + 0x174, 0x00000001);
503 gr_def(ctx, offset + 0x17c, 0x00000001);
504 gr_def(ctx, offset + 0x188, 0x00000002);
505 gr_def(ctx, offset + 0x190, 0x00000001);
506 gr_def(ctx, offset + 0x198, 0x00000001);
507 gr_def(ctx, offset + 0x1ac, 0x00000003);
508 offset += 0xd0;
509 } else {
510 gr_def(ctx, offset + 0xb0, 0x00000001);
511 gr_def(ctx, offset + 0xb4, 0x00000100);
512 gr_def(ctx, offset + 0xbc, 0x00000001);
513 gr_def(ctx, offset + 0xc8, 0x00000100);
514 gr_def(ctx, offset + 0xcc, 0x00000001);
515 gr_def(ctx, offset + 0xd0, 0x00000100);
516 gr_def(ctx, offset + 0xd8, 0x00000001);
517 gr_def(ctx, offset + 0xe4, 0x00000100);
518 }
519 gr_def(ctx, offset + 0xf8, 0x00000004);
520 gr_def(ctx, offset + 0xfc, 0x00000070);
521 gr_def(ctx, offset + 0x100, 0x00000080);
522 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
523 offset += 4;
524 gr_def(ctx, offset + 0x114, 0x0000000c);
525 if (dev_priv->chipset == 0x50)
526 offset -= 4;
527 gr_def(ctx, offset + 0x11c, 0x00000008);
528 gr_def(ctx, offset + 0x120, 0x00000014);
529 if (dev_priv->chipset == 0x50) {
530 gr_def(ctx, offset + 0x124, 0x00000026);
531 offset -= 0x18;
532 } else {
533 gr_def(ctx, offset + 0x128, 0x00000029);
534 gr_def(ctx, offset + 0x12c, 0x00000027);
535 gr_def(ctx, offset + 0x130, 0x00000026);
536 gr_def(ctx, offset + 0x134, 0x00000008);
537 gr_def(ctx, offset + 0x138, 0x00000004);
538 gr_def(ctx, offset + 0x13c, 0x00000027);
539 }
540 gr_def(ctx, offset + 0x148, 0x00000001);
541 gr_def(ctx, offset + 0x14c, 0x00000002);
542 gr_def(ctx, offset + 0x150, 0x00000003);
543 gr_def(ctx, offset + 0x154, 0x00000004);
544 gr_def(ctx, offset + 0x158, 0x00000005);
545 gr_def(ctx, offset + 0x15c, 0x00000006);
546 gr_def(ctx, offset + 0x160, 0x00000007);
547 gr_def(ctx, offset + 0x164, 0x00000001);
548 gr_def(ctx, offset + 0x1a8, 0x000000cf);
549 if (dev_priv->chipset == 0x50)
550 offset -= 4;
551 gr_def(ctx, offset + 0x1d8, 0x00000080);
552 gr_def(ctx, offset + 0x1dc, 0x00000004);
553 gr_def(ctx, offset + 0x1e0, 0x00000004);
554 if (dev_priv->chipset == 0x50)
555 offset -= 4;
556 else
557 gr_def(ctx, offset + 0x1e4, 0x00000003);
558 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
559 gr_def(ctx, offset + 0x1ec, 0x00000003);
560 offset += 8;
561 }
562 gr_def(ctx, offset + 0x1e8, 0x00000001);
563 if (dev_priv->chipset == 0x50)
564 offset -= 4;
565 gr_def(ctx, offset + 0x1f4, 0x00000012);
566 gr_def(ctx, offset + 0x1f8, 0x00000010);
567 gr_def(ctx, offset + 0x1fc, 0x0000000c);
568 gr_def(ctx, offset + 0x200, 0x00000001);
569 gr_def(ctx, offset + 0x210, 0x00000004);
570 gr_def(ctx, offset + 0x214, 0x00000002);
571 gr_def(ctx, offset + 0x218, 0x00000004);
572 if (dev_priv->chipset >= 0xa0)
573 offset += 4;
574 gr_def(ctx, offset + 0x224, 0x003fffff);
575 gr_def(ctx, offset + 0x228, 0x00001fff);
576 if (dev_priv->chipset == 0x50)
577 offset -= 0x20;
578 else if (dev_priv->chipset >= 0xa0) {
579 gr_def(ctx, offset + 0x250, 0x00000001);
580 gr_def(ctx, offset + 0x254, 0x00000001);
581 gr_def(ctx, offset + 0x258, 0x00000002);
582 offset += 0x10;
583 }
584 gr_def(ctx, offset + 0x250, 0x00000004);
585 gr_def(ctx, offset + 0x254, 0x00000014);
586 gr_def(ctx, offset + 0x258, 0x00000001);
587 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
588 offset += 4;
589 gr_def(ctx, offset + 0x264, 0x00000002);
590 if (dev_priv->chipset >= 0xa0)
591 offset += 8;
592 gr_def(ctx, offset + 0x270, 0x00000001);
593 gr_def(ctx, offset + 0x278, 0x00000002);
594 gr_def(ctx, offset + 0x27c, 0x00001000);
595 if (dev_priv->chipset == 0x50)
596 offset -= 0xc;
597 else {
598 gr_def(ctx, offset + 0x280, 0x00000e00);
599 gr_def(ctx, offset + 0x284, 0x00001000);
600 gr_def(ctx, offset + 0x288, 0x00001e00);
601 }
602 gr_def(ctx, offset + 0x290, 0x00000001);
603 gr_def(ctx, offset + 0x294, 0x00000001);
604 gr_def(ctx, offset + 0x298, 0x00000001);
605 gr_def(ctx, offset + 0x29c, 0x00000001);
606 gr_def(ctx, offset + 0x2a0, 0x00000001);
607 gr_def(ctx, offset + 0x2b0, 0x00000200);
608 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
609 gr_def(ctx, offset + 0x2b4, 0x00000200);
610 offset += 4;
611 }
612 if (dev_priv->chipset < 0xa0) {
613 gr_def(ctx, offset + 0x2b8, 0x00000001);
614 gr_def(ctx, offset + 0x2bc, 0x00000070);
615 gr_def(ctx, offset + 0x2c0, 0x00000080);
616 gr_def(ctx, offset + 0x2cc, 0x00000001);
617 gr_def(ctx, offset + 0x2d0, 0x00000070);
618 gr_def(ctx, offset + 0x2d4, 0x00000080);
619 } else {
620 gr_def(ctx, offset + 0x2b8, 0x00000001);
621 gr_def(ctx, offset + 0x2bc, 0x000000f0);
622 gr_def(ctx, offset + 0x2c0, 0x000000ff);
623 gr_def(ctx, offset + 0x2cc, 0x00000001);
624 gr_def(ctx, offset + 0x2d0, 0x000000f0);
625 gr_def(ctx, offset + 0x2d4, 0x000000ff);
626 gr_def(ctx, offset + 0x2dc, 0x00000009);
627 offset += 4;
628 }
629 gr_def(ctx, offset + 0x2e4, 0x00000001);
630 gr_def(ctx, offset + 0x2e8, 0x000000cf);
631 gr_def(ctx, offset + 0x2f0, 0x00000001);
632 gr_def(ctx, offset + 0x300, 0x000000cf);
633 gr_def(ctx, offset + 0x308, 0x00000002);
634 gr_def(ctx, offset + 0x310, 0x00000001);
635 gr_def(ctx, offset + 0x318, 0x00000001);
636 gr_def(ctx, offset + 0x320, 0x000000cf);
637 gr_def(ctx, offset + 0x324, 0x000000cf);
638 gr_def(ctx, offset + 0x328, 0x00000001);
639
640 /* 6000? */
641 if (dev_priv->chipset == 0x50)
642 cp_ctx(ctx, 0x4063e0, 0x1);
643
644 /* 6800 */
645 if (dev_priv->chipset < 0x90) {
646 cp_ctx(ctx, 0x406814, 0x2b);
647 gr_def(ctx, 0x406818, 0x00000f80);
648 gr_def(ctx, 0x406860, 0x007f0080);
649 gr_def(ctx, 0x40689c, 0x007f0080);
650 } else {
651 cp_ctx(ctx, 0x406814, 0x4);
652 if (dev_priv->chipset == 0x98)
653 gr_def(ctx, 0x406818, 0x00000f80);
654 else
655 gr_def(ctx, 0x406818, 0x00001f80);
656 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
657 gr_def(ctx, 0x40681c, 0x00000030);
658 cp_ctx(ctx, 0x406830, 0x3);
659 }
660
661 /* 7000: per-ROP group state */
662 for (i = 0; i < 8; i++) {
663 if (units & (1<<(i+16))) {
664 cp_ctx(ctx, 0x407000 + (i<<8), 3);
665 if (dev_priv->chipset == 0x50)
666 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
667 else if (dev_priv->chipset != 0xa5)
668 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
669 else
670 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
671 gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
672
673 if (dev_priv->chipset == 0x50) {
674 cp_ctx(ctx, 0x407010 + (i<<8), 1);
675 } else if (dev_priv->chipset < 0xa0) {
676 cp_ctx(ctx, 0x407010 + (i<<8), 2);
677 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
678 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
679 } else {
680 cp_ctx(ctx, 0x407010 + (i<<8), 3);
681 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
682 if (dev_priv->chipset != 0xa5)
683 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
684 else
685 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
686 }
687
688 cp_ctx(ctx, 0x407080 + (i<<8), 4);
689 if (dev_priv->chipset != 0xa5)
690 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
691 else
692 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
693 if (dev_priv->chipset == 0x50)
694 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
695 else
696 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
697 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
698
699 if (dev_priv->chipset < 0xa0)
700 cp_ctx(ctx, 0x407094 + (i<<8), 1);
701 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
702 cp_ctx(ctx, 0x407094 + (i<<8), 3);
703 else {
704 cp_ctx(ctx, 0x407094 + (i<<8), 4);
705 gr_def(ctx, 0x4070a0 + (i<<8), 1);
706 }
707 }
708 }
709
710 cp_ctx(ctx, 0x407c00, 0x3);
711 if (dev_priv->chipset < 0x90)
712 gr_def(ctx, 0x407c00, 0x00010040);
713 else if (dev_priv->chipset < 0xa0)
714 gr_def(ctx, 0x407c00, 0x00390040);
715 else
716 gr_def(ctx, 0x407c00, 0x003d0040);
717 gr_def(ctx, 0x407c08, 0x00000022);
718 if (dev_priv->chipset >= 0xa0) {
719 cp_ctx(ctx, 0x407c10, 0x3);
720 cp_ctx(ctx, 0x407c20, 0x1);
721 cp_ctx(ctx, 0x407c2c, 0x1);
722 }
723
724 if (dev_priv->chipset < 0xa0) {
725 cp_ctx(ctx, 0x407d00, 0x9);
726 } else {
727 cp_ctx(ctx, 0x407d00, 0x15);
728 }
729 if (dev_priv->chipset == 0x98)
730 gr_def(ctx, 0x407d08, 0x00380040);
731 else {
732 if (dev_priv->chipset < 0x90)
733 gr_def(ctx, 0x407d08, 0x00010040);
734 else if (dev_priv->chipset < 0xa0)
735 gr_def(ctx, 0x407d08, 0x00390040);
736 else
737 gr_def(ctx, 0x407d08, 0x003d0040);
738 gr_def(ctx, 0x407d0c, 0x00000022);
739 }
740
741 /* 8000+: per-TP state */
742 for (i = 0; i < 10; i++) {
743 if (units & (1<<i)) {
744 if (dev_priv->chipset < 0xa0)
745 base = 0x408000 + (i<<12);
746 else
747 base = 0x408000 + (i<<11);
748 if (dev_priv->chipset < 0xa0)
749 offset = base + 0xc00;
750 else
751 offset = base + 0x80;
752 cp_ctx(ctx, offset + 0x00, 1);
753 gr_def(ctx, offset + 0x00, 0x0000ff0a);
754 cp_ctx(ctx, offset + 0x08, 1);
755
756 /* per-MP state */
757 for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
758 if (!(units & (1 << (j+24)))) continue;
759 if (dev_priv->chipset < 0xa0)
760 offset = base + 0x200 + (j<<7);
761 else
762 offset = base + 0x100 + (j<<7);
763 cp_ctx(ctx, offset, 0x20);
764 gr_def(ctx, offset + 0x00, 0x01800000);
765 gr_def(ctx, offset + 0x04, 0x00160000);
766 gr_def(ctx, offset + 0x08, 0x01800000);
767 gr_def(ctx, offset + 0x18, 0x0003ffff);
768 switch (dev_priv->chipset) {
769 case 0x50:
770 gr_def(ctx, offset + 0x1c, 0x00080000);
771 break;
772 case 0x84:
773 gr_def(ctx, offset + 0x1c, 0x00880000);
774 break;
775 case 0x86:
776 gr_def(ctx, offset + 0x1c, 0x008c0000);
777 break;
778 case 0x92:
779 case 0x96:
780 case 0x98:
781 gr_def(ctx, offset + 0x1c, 0x118c0000);
782 break;
783 case 0x94:
784 gr_def(ctx, offset + 0x1c, 0x10880000);
785 break;
786 case 0xa0:
787 case 0xa5:
788 gr_def(ctx, offset + 0x1c, 0x310c0000);
789 break;
790 case 0xa8:
791 case 0xaa:
792 case 0xac:
793 gr_def(ctx, offset + 0x1c, 0x300c0000);
794 break;
795 }
796 gr_def(ctx, offset + 0x40, 0x00010401);
797 if (dev_priv->chipset == 0x50)
798 gr_def(ctx, offset + 0x48, 0x00000040);
799 else
800 gr_def(ctx, offset + 0x48, 0x00000078);
801 gr_def(ctx, offset + 0x50, 0x000000bf);
802 gr_def(ctx, offset + 0x58, 0x00001210);
803 if (dev_priv->chipset == 0x50)
804 gr_def(ctx, offset + 0x5c, 0x00000080);
805 else
806 gr_def(ctx, offset + 0x5c, 0x08000080);
807 if (dev_priv->chipset >= 0xa0)
808 gr_def(ctx, offset + 0x68, 0x0000003e);
809 }
810
811 if (dev_priv->chipset < 0xa0)
812 cp_ctx(ctx, base + 0x300, 0x4);
813 else
814 cp_ctx(ctx, base + 0x300, 0x5);
815 if (dev_priv->chipset == 0x50)
816 gr_def(ctx, base + 0x304, 0x00007070);
817 else if (dev_priv->chipset < 0xa0)
818 gr_def(ctx, base + 0x304, 0x00027070);
819 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
820 gr_def(ctx, base + 0x304, 0x01127070);
821 else
822 gr_def(ctx, base + 0x304, 0x05127070);
823
824 if (dev_priv->chipset < 0xa0)
825 cp_ctx(ctx, base + 0x318, 1);
826 else
827 cp_ctx(ctx, base + 0x320, 1);
828 if (dev_priv->chipset == 0x50)
829 gr_def(ctx, base + 0x318, 0x0003ffff);
830 else if (dev_priv->chipset < 0xa0)
831 gr_def(ctx, base + 0x318, 0x03ffffff);
832 else
833 gr_def(ctx, base + 0x320, 0x07ffffff);
834
835 if (dev_priv->chipset < 0xa0)
836 cp_ctx(ctx, base + 0x324, 5);
837 else
838 cp_ctx(ctx, base + 0x328, 4);
839
840 if (dev_priv->chipset < 0xa0) {
841 cp_ctx(ctx, base + 0x340, 9);
842 offset = base + 0x340;
843 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
844 cp_ctx(ctx, base + 0x33c, 0xb);
845 offset = base + 0x344;
846 } else {
847 cp_ctx(ctx, base + 0x33c, 0xd);
848 offset = base + 0x344;
849 }
850 gr_def(ctx, offset + 0x0, 0x00120407);
851 gr_def(ctx, offset + 0x4, 0x05091507);
852 if (dev_priv->chipset == 0x84)
853 gr_def(ctx, offset + 0x8, 0x05100202);
854 else
855 gr_def(ctx, offset + 0x8, 0x05010202);
856 gr_def(ctx, offset + 0xc, 0x00030201);
857
858 cp_ctx(ctx, base + 0x400, 2);
859 gr_def(ctx, base + 0x404, 0x00000040);
860 cp_ctx(ctx, base + 0x40c, 2);
861 gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
862 gr_def(ctx, base + 0x410, 0x00141210);
863
864 if (dev_priv->chipset < 0xa0)
865 offset = base + 0x800;
866 else
867 offset = base + 0x500;
868 cp_ctx(ctx, offset, 6);
869 gr_def(ctx, offset + 0x0, 0x000001f0);
870 gr_def(ctx, offset + 0x4, 0x00000001);
871 gr_def(ctx, offset + 0x8, 0x00000003);
872 if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
873 gr_def(ctx, offset + 0xc, 0x00008000);
874 gr_def(ctx, offset + 0x14, 0x00039e00);
875 cp_ctx(ctx, offset + 0x1c, 2);
876 if (dev_priv->chipset == 0x50)
877 gr_def(ctx, offset + 0x1c, 0x00000040);
878 else
879 gr_def(ctx, offset + 0x1c, 0x00000100);
880 gr_def(ctx, offset + 0x20, 0x00003800);
881
882 if (dev_priv->chipset >= 0xa0) {
883 cp_ctx(ctx, base + 0x54c, 2);
884 if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
885 gr_def(ctx, base + 0x54c, 0x003fe006);
886 else
887 gr_def(ctx, base + 0x54c, 0x003fe007);
888 gr_def(ctx, base + 0x550, 0x003fe000);
889 }
890
891 if (dev_priv->chipset < 0xa0)
892 offset = base + 0xa00;
893 else
894 offset = base + 0x680;
895 cp_ctx(ctx, offset, 1);
896 gr_def(ctx, offset, 0x00404040);
897
898 if (dev_priv->chipset < 0xa0)
899 offset = base + 0xe00;
900 else
901 offset = base + 0x700;
902 cp_ctx(ctx, offset, 2);
903 if (dev_priv->chipset < 0xa0)
904 gr_def(ctx, offset, 0x0077f005);
905 else if (dev_priv->chipset == 0xa5)
906 gr_def(ctx, offset, 0x6cf7f007);
907 else if (dev_priv->chipset == 0xa8)
908 gr_def(ctx, offset, 0x6cfff007);
909 else if (dev_priv->chipset == 0xac)
910 gr_def(ctx, offset, 0x0cfff007);
911 else
912 gr_def(ctx, offset, 0x0cf7f007);
913 if (dev_priv->chipset == 0x50)
914 gr_def(ctx, offset + 0x4, 0x00007fff);
915 else if (dev_priv->chipset < 0xa0)
916 gr_def(ctx, offset + 0x4, 0x003f7fff);
917 else
918 gr_def(ctx, offset + 0x4, 0x02bf7fff);
919 cp_ctx(ctx, offset + 0x2c, 1);
920 if (dev_priv->chipset == 0x50) {
921 cp_ctx(ctx, offset + 0x50, 9);
922 gr_def(ctx, offset + 0x54, 0x000003ff);
923 gr_def(ctx, offset + 0x58, 0x00000003);
924 gr_def(ctx, offset + 0x5c, 0x00000003);
925 gr_def(ctx, offset + 0x60, 0x000001ff);
926 gr_def(ctx, offset + 0x64, 0x0000001f);
927 gr_def(ctx, offset + 0x68, 0x0000000f);
928 gr_def(ctx, offset + 0x6c, 0x0000000f);
929 } else if(dev_priv->chipset < 0xa0) {
930 cp_ctx(ctx, offset + 0x50, 1);
931 cp_ctx(ctx, offset + 0x70, 1);
932 } else {
933 cp_ctx(ctx, offset + 0x50, 1);
934 cp_ctx(ctx, offset + 0x60, 5);
935 }
936 }
937 }
938}
939
940/*
941 * xfer areas. These are a pain.
942 *
943 * There are 2 xfer areas: the first one is big and contains all sorts of
944 * stuff, the second is small and contains some per-TP context.
945 *
946 * Each area is split into 8 "strands". The areas, when saved to grctx,
947 * are made of 8-word blocks. Each block contains a single word from
948 * each strand. The strands are independent of each other, their
949 * addresses are unrelated to each other, and data in them is closely
950 * packed together. The strand layout varies a bit between cards: here
951 * and there, a single word is thrown out in the middle and the whole
952 * strand is offset by a bit from corresponding one on another chipset.
953 * For this reason, addresses of stuff in strands are almost useless.
954 * Knowing sequence of stuff and size of gaps between them is much more
955 * useful, and that's how we build the strands in our generator.
956 *
957 * NVA0 takes this mess to a whole new level by cutting the old strands
958 * into a few dozen pieces [known as genes], rearranging them randomly,
959 * and putting them back together to make new strands. Hopefully these
960 * genes correspond more or less directly to the same PGRAPH subunits
961 * as in 400040 register.
962 *
963 * The most common value in default context is 0, and when the genes
964 * are separated by 0's, gene bounduaries are quite speculative...
965 * some of them can be clearly deduced, others can be guessed, and yet
966 * others won't be resolved without figuring out the real meaning of
967 * given ctxval. For the same reason, ending point of each strand
968 * is unknown. Except for strand 0, which is the longest strand and
969 * its end corresponds to end of the whole xfer.
970 *
971 * An unsolved mystery is the seek instruction: it takes an argument
972 * in bits 8-18, and that argument is clearly the place in strands to
973 * seek to... but the offsets don't seem to correspond to offsets as
974 * seen in grctx. Perhaps there's another, real, not randomly-changing
975 * addressing in strands, and the xfer insn just happens to skip over
976 * the unused bits? NV10-NV30 PIPE comes to mind...
977 *
978 * As far as I know, there's no way to access the xfer areas directly
979 * without the help of ctxprog.
980 */
981
982static inline void
983xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
984 int i;
985 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
986 for (i = 0; i < num; i++)
987 nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
988 ctx->ctxvals_pos += num << 3;
989}
990
991/* Gene declarations... */
992
993static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
994static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
995static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
996static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
997static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
998static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
999static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
1000static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
1001static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
1002static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
1003static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
1004static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
1005static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1006
1007static void
1008nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1009{
1010 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1011 int i;
1012 int offset;
1013 int size = 0;
1014 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
1015
1016 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1017 ctx->ctxvals_base = offset;
1018
1019 if (dev_priv->chipset < 0xa0) {
1020 /* Strand 0 */
1021 ctx->ctxvals_pos = offset;
1022 switch (dev_priv->chipset) {
1023 case 0x50:
1024 xf_emit(ctx, 0x99, 0);
1025 break;
1026 case 0x84:
1027 case 0x86:
1028 xf_emit(ctx, 0x384, 0);
1029 break;
1030 case 0x92:
1031 case 0x94:
1032 case 0x96:
1033 case 0x98:
1034 xf_emit(ctx, 0x380, 0);
1035 break;
1036 }
1037 nv50_graph_construct_gene_m2mf (ctx);
1038 switch (dev_priv->chipset) {
1039 case 0x50:
1040 case 0x84:
1041 case 0x86:
1042 case 0x98:
1043 xf_emit(ctx, 0x4c4, 0);
1044 break;
1045 case 0x92:
1046 case 0x94:
1047 case 0x96:
1048 xf_emit(ctx, 0x984, 0);
1049 break;
1050 }
1051 nv50_graph_construct_gene_unk5(ctx);
1052 if (dev_priv->chipset == 0x50)
1053 xf_emit(ctx, 0xa, 0);
1054 else
1055 xf_emit(ctx, 0xb, 0);
1056 nv50_graph_construct_gene_unk4(ctx);
1057 nv50_graph_construct_gene_unk3(ctx);
1058 if ((ctx->ctxvals_pos-offset)/8 > size)
1059 size = (ctx->ctxvals_pos-offset)/8;
1060
1061 /* Strand 1 */
1062 ctx->ctxvals_pos = offset + 0x1;
1063 nv50_graph_construct_gene_unk6(ctx);
1064 nv50_graph_construct_gene_unk7(ctx);
1065 nv50_graph_construct_gene_unk8(ctx);
1066 switch (dev_priv->chipset) {
1067 case 0x50:
1068 case 0x92:
1069 xf_emit(ctx, 0xfb, 0);
1070 break;
1071 case 0x84:
1072 xf_emit(ctx, 0xd3, 0);
1073 break;
1074 case 0x94:
1075 case 0x96:
1076 xf_emit(ctx, 0xab, 0);
1077 break;
1078 case 0x86:
1079 case 0x98:
1080 xf_emit(ctx, 0x6b, 0);
1081 break;
1082 }
1083 xf_emit(ctx, 2, 0x4e3bfdf);
1084 xf_emit(ctx, 4, 0);
1085 xf_emit(ctx, 1, 0x0fac6881);
1086 xf_emit(ctx, 0xb, 0);
1087 xf_emit(ctx, 2, 0x4e3bfdf);
1088 if ((ctx->ctxvals_pos-offset)/8 > size)
1089 size = (ctx->ctxvals_pos-offset)/8;
1090
1091 /* Strand 2 */
1092 ctx->ctxvals_pos = offset + 0x2;
1093 switch (dev_priv->chipset) {
1094 case 0x50:
1095 case 0x92:
1096 xf_emit(ctx, 0xa80, 0);
1097 break;
1098 case 0x84:
1099 xf_emit(ctx, 0xa7e, 0);
1100 break;
1101 case 0x94:
1102 case 0x96:
1103 xf_emit(ctx, 0xa7c, 0);
1104 break;
1105 case 0x86:
1106 case 0x98:
1107 xf_emit(ctx, 0xa7a, 0);
1108 break;
1109 }
1110 xf_emit(ctx, 1, 0x3fffff);
1111 xf_emit(ctx, 2, 0);
1112 xf_emit(ctx, 1, 0x1fff);
1113 xf_emit(ctx, 0xe, 0);
1114 nv50_graph_construct_gene_unk9(ctx);
1115 nv50_graph_construct_gene_unk2(ctx);
1116 nv50_graph_construct_gene_unk1(ctx);
1117 nv50_graph_construct_gene_unk10(ctx);
1118 if ((ctx->ctxvals_pos-offset)/8 > size)
1119 size = (ctx->ctxvals_pos-offset)/8;
1120
1121 /* Strand 3: per-ROP group state */
1122 ctx->ctxvals_pos = offset + 3;
1123 for (i = 0; i < 6; i++)
1124 if (units & (1 << (i + 16)))
1125 nv50_graph_construct_gene_ropc(ctx);
1126 if ((ctx->ctxvals_pos-offset)/8 > size)
1127 size = (ctx->ctxvals_pos-offset)/8;
1128
1129 /* Strands 4-7: per-TP state */
1130 for (i = 0; i < 4; i++) {
1131 ctx->ctxvals_pos = offset + 4 + i;
1132 if (units & (1 << (2 * i)))
1133 nv50_graph_construct_xfer_tp(ctx);
1134 if (units & (1 << (2 * i + 1)))
1135 nv50_graph_construct_xfer_tp(ctx);
1136 if ((ctx->ctxvals_pos-offset)/8 > size)
1137 size = (ctx->ctxvals_pos-offset)/8;
1138 }
1139 } else {
1140 /* Strand 0 */
1141 ctx->ctxvals_pos = offset;
1142 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1143 xf_emit(ctx, 0x385, 0);
1144 else
1145 xf_emit(ctx, 0x384, 0);
1146 nv50_graph_construct_gene_m2mf(ctx);
1147 xf_emit(ctx, 0x950, 0);
1148 nv50_graph_construct_gene_unk10(ctx);
1149 xf_emit(ctx, 1, 0x0fac6881);
1150 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1151 xf_emit(ctx, 1, 1);
1152 xf_emit(ctx, 3, 0);
1153 }
1154 nv50_graph_construct_gene_unk8(ctx);
1155 if (dev_priv->chipset == 0xa0)
1156 xf_emit(ctx, 0x189, 0);
1157 else if (dev_priv->chipset < 0xa8)
1158 xf_emit(ctx, 0x99, 0);
1159 else if (dev_priv->chipset == 0xaa)
1160 xf_emit(ctx, 0x65, 0);
1161 else
1162 xf_emit(ctx, 0x6d, 0);
1163 nv50_graph_construct_gene_unk9(ctx);
1164 if ((ctx->ctxvals_pos-offset)/8 > size)
1165 size = (ctx->ctxvals_pos-offset)/8;
1166
1167 /* Strand 1 */
1168 ctx->ctxvals_pos = offset + 1;
1169 nv50_graph_construct_gene_unk1(ctx);
1170 if ((ctx->ctxvals_pos-offset)/8 > size)
1171 size = (ctx->ctxvals_pos-offset)/8;
1172
1173 /* Strand 2 */
1174 ctx->ctxvals_pos = offset + 2;
1175 if (dev_priv->chipset == 0xa0) {
1176 nv50_graph_construct_gene_unk2(ctx);
1177 }
1178 xf_emit(ctx, 0x36, 0);
1179 nv50_graph_construct_gene_unk5(ctx);
1180 if ((ctx->ctxvals_pos-offset)/8 > size)
1181 size = (ctx->ctxvals_pos-offset)/8;
1182
1183 /* Strand 3 */
1184 ctx->ctxvals_pos = offset + 3;
1185 xf_emit(ctx, 1, 0);
1186 xf_emit(ctx, 1, 1);
1187 nv50_graph_construct_gene_unk6(ctx);
1188 if ((ctx->ctxvals_pos-offset)/8 > size)
1189 size = (ctx->ctxvals_pos-offset)/8;
1190
1191 /* Strand 4 */
1192 ctx->ctxvals_pos = offset + 4;
1193 if (dev_priv->chipset == 0xa0)
1194 xf_emit(ctx, 0xa80, 0);
1195 else
1196 xf_emit(ctx, 0xa7a, 0);
1197 xf_emit(ctx, 1, 0x3fffff);
1198 xf_emit(ctx, 2, 0);
1199 xf_emit(ctx, 1, 0x1fff);
1200 if ((ctx->ctxvals_pos-offset)/8 > size)
1201 size = (ctx->ctxvals_pos-offset)/8;
1202
1203 /* Strand 5 */
1204 ctx->ctxvals_pos = offset + 5;
1205 xf_emit(ctx, 1, 0);
1206 xf_emit(ctx, 1, 0x0fac6881);
1207 xf_emit(ctx, 0xb, 0);
1208 xf_emit(ctx, 2, 0x4e3bfdf);
1209 xf_emit(ctx, 3, 0);
1210 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1211 xf_emit(ctx, 1, 0x11);
1212 xf_emit(ctx, 1, 0);
1213 xf_emit(ctx, 2, 0x4e3bfdf);
1214 xf_emit(ctx, 2, 0);
1215 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1216 xf_emit(ctx, 1, 0x11);
1217 xf_emit(ctx, 1, 0);
1218 for (i = 0; i < 8; i++)
1219 if (units & (1<<(i+16)))
1220 nv50_graph_construct_gene_ropc(ctx);
1221 if ((ctx->ctxvals_pos-offset)/8 > size)
1222 size = (ctx->ctxvals_pos-offset)/8;
1223
1224 /* Strand 6 */
1225 ctx->ctxvals_pos = offset + 6;
1226 nv50_graph_construct_gene_unk3(ctx);
1227 xf_emit(ctx, 0xb, 0);
1228 nv50_graph_construct_gene_unk4(ctx);
1229 nv50_graph_construct_gene_unk7(ctx);
1230 if (units & (1 << 0))
1231 nv50_graph_construct_xfer_tp(ctx);
1232 if (units & (1 << 1))
1233 nv50_graph_construct_xfer_tp(ctx);
1234 if (units & (1 << 2))
1235 nv50_graph_construct_xfer_tp(ctx);
1236 if (units & (1 << 3))
1237 nv50_graph_construct_xfer_tp(ctx);
1238 if ((ctx->ctxvals_pos-offset)/8 > size)
1239 size = (ctx->ctxvals_pos-offset)/8;
1240
1241 /* Strand 7 */
1242 ctx->ctxvals_pos = offset + 7;
1243 if (dev_priv->chipset == 0xa0) {
1244 if (units & (1 << 4))
1245 nv50_graph_construct_xfer_tp(ctx);
1246 if (units & (1 << 5))
1247 nv50_graph_construct_xfer_tp(ctx);
1248 if (units & (1 << 6))
1249 nv50_graph_construct_xfer_tp(ctx);
1250 if (units & (1 << 7))
1251 nv50_graph_construct_xfer_tp(ctx);
1252 if (units & (1 << 8))
1253 nv50_graph_construct_xfer_tp(ctx);
1254 if (units & (1 << 9))
1255 nv50_graph_construct_xfer_tp(ctx);
1256 } else {
1257 nv50_graph_construct_gene_unk2(ctx);
1258 }
1259 if ((ctx->ctxvals_pos-offset)/8 > size)
1260 size = (ctx->ctxvals_pos-offset)/8;
1261 }
1262
1263 ctx->ctxvals_pos = offset + size * 8;
1264 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
1265 cp_lsr (ctx, offset);
1266 cp_out (ctx, CP_SET_XFER_POINTER);
1267 cp_lsr (ctx, size);
1268 cp_out (ctx, CP_SEEK_1);
1269 cp_out (ctx, CP_XFER_1);
1270 cp_wait(ctx, XFER, BUSY);
1271}
1272
1273/*
1274 * non-trivial demagiced parts of ctx init go here
1275 */
1276
1277static void
1278nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1279{
1280 /* m2mf state */
1281 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
1282 xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
1283 xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
1284 xf_emit (ctx, 1, 0); /* OFFSET_IN */
1285 xf_emit (ctx, 1, 0); /* OFFSET_OUT */
1286 xf_emit (ctx, 1, 0); /* PITCH_IN */
1287 xf_emit (ctx, 1, 0); /* PITCH_OUT */
1288 xf_emit (ctx, 1, 0); /* LINE_LENGTH */
1289 xf_emit (ctx, 1, 0); /* LINE_COUNT */
1290 xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
1291 xf_emit (ctx, 1, 1); /* LINEAR_IN */
1292 xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
1293 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
1294 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
1295 xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
1296 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
1297 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
1298 xf_emit (ctx, 1, 1); /* LINEAR_OUT */
1299 xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
1300 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
1301 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
1302 xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
1303 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
1304 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
1305 xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
1306 xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
1307}
1308
1309static void
1310nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
1311{
1312 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1313 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1314 xf_emit(ctx, 2, 4);
1315 xf_emit(ctx, 1, 0);
1316 xf_emit(ctx, 1, 0x80);
1317 xf_emit(ctx, 1, 4);
1318 xf_emit(ctx, 1, 0x80c14);
1319 xf_emit(ctx, 1, 0);
1320 if (dev_priv->chipset == 0x50)
1321 xf_emit(ctx, 1, 0x3ff);
1322 else
1323 xf_emit(ctx, 1, 0x7ff);
1324 switch (dev_priv->chipset) {
1325 case 0x50:
1326 case 0x86:
1327 case 0x98:
1328 case 0xaa:
1329 case 0xac:
1330 xf_emit(ctx, 0x542, 0);
1331 break;
1332 case 0x84:
1333 case 0x92:
1334 case 0x94:
1335 case 0x96:
1336 xf_emit(ctx, 0x942, 0);
1337 break;
1338 case 0xa0:
1339 xf_emit(ctx, 0x2042, 0);
1340 break;
1341 case 0xa5:
1342 case 0xa8:
1343 xf_emit(ctx, 0x842, 0);
1344 break;
1345 }
1346 xf_emit(ctx, 2, 4);
1347 xf_emit(ctx, 1, 0);
1348 xf_emit(ctx, 1, 0x80);
1349 xf_emit(ctx, 1, 4);
1350 xf_emit(ctx, 1, 1);
1351 xf_emit(ctx, 1, 0);
1352 xf_emit(ctx, 1, 0x27);
1353 xf_emit(ctx, 1, 0);
1354 xf_emit(ctx, 1, 0x26);
1355 xf_emit(ctx, 3, 0);
1356}
1357
1358static void
1359nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
1360{
1361 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1362 xf_emit(ctx, 0x10, 0x04000000);
1363 xf_emit(ctx, 0x24, 0);
1364 xf_emit(ctx, 2, 0x04e3bfdf);
1365 xf_emit(ctx, 2, 0);
1366 xf_emit(ctx, 1, 0x1fe21);
1367}
1368
1369static void
1370nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
1371{
1372 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1373 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1374 if (dev_priv->chipset != 0x50) {
1375 xf_emit(ctx, 5, 0);
1376 xf_emit(ctx, 1, 0x80c14);
1377 xf_emit(ctx, 2, 0);
1378 xf_emit(ctx, 1, 0x804);
1379 xf_emit(ctx, 1, 0);
1380 xf_emit(ctx, 2, 4);
1381 xf_emit(ctx, 1, 0x8100c12);
1382 }
1383 xf_emit(ctx, 1, 0);
1384 xf_emit(ctx, 2, 4);
1385 xf_emit(ctx, 1, 0);
1386 xf_emit(ctx, 1, 0x10);
1387 if (dev_priv->chipset == 0x50)
1388 xf_emit(ctx, 3, 0);
1389 else
1390 xf_emit(ctx, 4, 0);
1391 xf_emit(ctx, 1, 0x804);
1392 xf_emit(ctx, 1, 1);
1393 xf_emit(ctx, 1, 0x1a);
1394 if (dev_priv->chipset != 0x50)
1395 xf_emit(ctx, 1, 0x7f);
1396 xf_emit(ctx, 1, 0);
1397 xf_emit(ctx, 1, 1);
1398 xf_emit(ctx, 1, 0x80c14);
1399 xf_emit(ctx, 1, 0);
1400 xf_emit(ctx, 1, 0x8100c12);
1401 xf_emit(ctx, 2, 4);
1402 xf_emit(ctx, 1, 0);
1403 xf_emit(ctx, 1, 0x10);
1404 xf_emit(ctx, 3, 0);
1405 xf_emit(ctx, 1, 1);
1406 xf_emit(ctx, 1, 0x8100c12);
1407 xf_emit(ctx, 6, 0);
1408 if (dev_priv->chipset == 0x50)
1409 xf_emit(ctx, 1, 0x3ff);
1410 else
1411 xf_emit(ctx, 1, 0x7ff);
1412 xf_emit(ctx, 1, 0x80c14);
1413 xf_emit(ctx, 0x38, 0);
1414 xf_emit(ctx, 1, 1);
1415 xf_emit(ctx, 2, 0);
1416 xf_emit(ctx, 1, 0x10);
1417 xf_emit(ctx, 0x38, 0);
1418 xf_emit(ctx, 2, 0x88);
1419 xf_emit(ctx, 2, 0);
1420 xf_emit(ctx, 1, 4);
1421 xf_emit(ctx, 0x16, 0);
1422 xf_emit(ctx, 1, 0x26);
1423 xf_emit(ctx, 2, 0);
1424 xf_emit(ctx, 1, 0x3f800000);
1425 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1426 xf_emit(ctx, 4, 0);
1427 else
1428 xf_emit(ctx, 3, 0);
1429 xf_emit(ctx, 1, 0x1a);
1430 xf_emit(ctx, 1, 0x10);
1431 if (dev_priv->chipset != 0x50)
1432 xf_emit(ctx, 0x28, 0);
1433 else
1434 xf_emit(ctx, 0x25, 0);
1435 xf_emit(ctx, 1, 0x52);
1436 xf_emit(ctx, 1, 0);
1437 xf_emit(ctx, 1, 0x26);
1438 xf_emit(ctx, 1, 0);
1439 xf_emit(ctx, 2, 4);
1440 xf_emit(ctx, 1, 0);
1441 xf_emit(ctx, 1, 0x1a);
1442 xf_emit(ctx, 2, 0);
1443 xf_emit(ctx, 1, 0x00ffff00);
1444 xf_emit(ctx, 1, 0);
1445}
1446
1447static void
1448nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
1449{
1450 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1451 /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
1452 xf_emit(ctx, 1, 0x3f);
1453 xf_emit(ctx, 0xa, 0);
1454 xf_emit(ctx, 1, 2);
1455 xf_emit(ctx, 2, 0x04000000);
1456 xf_emit(ctx, 8, 0);
1457 xf_emit(ctx, 1, 4);
1458 xf_emit(ctx, 3, 0);
1459 xf_emit(ctx, 1, 4);
1460 if (dev_priv->chipset == 0x50)
1461 xf_emit(ctx, 0x10, 0);
1462 else
1463 xf_emit(ctx, 0x11, 0);
1464 xf_emit(ctx, 1, 1);
1465 xf_emit(ctx, 1, 0x1001);
1466 xf_emit(ctx, 4, 0xffff);
1467 xf_emit(ctx, 0x20, 0);
1468 xf_emit(ctx, 0x10, 0x3f800000);
1469 xf_emit(ctx, 1, 0x10);
1470 if (dev_priv->chipset == 0x50)
1471 xf_emit(ctx, 1, 0);
1472 else
1473 xf_emit(ctx, 2, 0);
1474 xf_emit(ctx, 1, 3);
1475 xf_emit(ctx, 2, 0);
1476}
1477
1478static void
1479nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
1480{
1481 /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
1482 xf_emit(ctx, 2, 0x04000000);
1483 xf_emit(ctx, 1, 0);
1484 xf_emit(ctx, 1, 0x80);
1485 xf_emit(ctx, 3, 0);
1486 xf_emit(ctx, 1, 0x80);
1487 xf_emit(ctx, 1, 0);
1488}
1489
1490static void
1491nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
1492{
1493 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1494 /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
1495 xf_emit(ctx, 2, 4);
1496 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1497 xf_emit(ctx, 0x1c4d, 0);
1498 else
1499 xf_emit(ctx, 0x1c4b, 0);
1500 xf_emit(ctx, 2, 4);
1501 xf_emit(ctx, 1, 0x8100c12);
1502 if (dev_priv->chipset != 0x50)
1503 xf_emit(ctx, 1, 3);
1504 xf_emit(ctx, 1, 0);
1505 xf_emit(ctx, 1, 0x8100c12);
1506 xf_emit(ctx, 1, 0);
1507 xf_emit(ctx, 1, 0x80c14);
1508 xf_emit(ctx, 1, 1);
1509 if (dev_priv->chipset >= 0xa0)
1510 xf_emit(ctx, 2, 4);
1511 xf_emit(ctx, 1, 0x80c14);
1512 xf_emit(ctx, 2, 0);
1513 xf_emit(ctx, 1, 0x8100c12);
1514 xf_emit(ctx, 1, 0x27);
1515 xf_emit(ctx, 2, 0);
1516 xf_emit(ctx, 1, 1);
1517 xf_emit(ctx, 0x3c1, 0);
1518 xf_emit(ctx, 1, 1);
1519 xf_emit(ctx, 0x16, 0);
1520 xf_emit(ctx, 1, 0x8100c12);
1521 xf_emit(ctx, 1, 0);
1522}
1523
1524static void
1525nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
1526{
1527 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1528 /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
1529 xf_emit(ctx, 4, 0);
1530 xf_emit(ctx, 1, 0xf);
1531 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1532 xf_emit(ctx, 8, 0);
1533 else
1534 xf_emit(ctx, 4, 0);
1535 xf_emit(ctx, 1, 0x20);
1536 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1537 xf_emit(ctx, 0x11, 0);
1538 else if (dev_priv->chipset >= 0xa0)
1539 xf_emit(ctx, 0xf, 0);
1540 else
1541 xf_emit(ctx, 0xe, 0);
1542 xf_emit(ctx, 1, 0x1a);
1543 xf_emit(ctx, 0xd, 0);
1544 xf_emit(ctx, 2, 4);
1545 xf_emit(ctx, 1, 0);
1546 xf_emit(ctx, 1, 4);
1547 xf_emit(ctx, 1, 8);
1548 xf_emit(ctx, 1, 0);
1549 if (dev_priv->chipset == 0x50)
1550 xf_emit(ctx, 1, 0x3ff);
1551 else
1552 xf_emit(ctx, 1, 0x7ff);
1553 if (dev_priv->chipset == 0xa8)
1554 xf_emit(ctx, 1, 0x1e00);
1555 xf_emit(ctx, 0xc, 0);
1556 xf_emit(ctx, 1, 0xf);
1557 if (dev_priv->chipset == 0x50)
1558 xf_emit(ctx, 0x125, 0);
1559 else if (dev_priv->chipset < 0xa0)
1560 xf_emit(ctx, 0x126, 0);
1561 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1562 xf_emit(ctx, 0x124, 0);
1563 else
1564 xf_emit(ctx, 0x1f7, 0);
1565 xf_emit(ctx, 1, 0xf);
1566 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1567 xf_emit(ctx, 3, 0);
1568 else
1569 xf_emit(ctx, 1, 0);
1570 xf_emit(ctx, 1, 1);
1571 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1572 xf_emit(ctx, 0xa1, 0);
1573 else
1574 xf_emit(ctx, 0x5a, 0);
1575 xf_emit(ctx, 1, 0xf);
1576 if (dev_priv->chipset < 0xa0)
1577 xf_emit(ctx, 0x834, 0);
1578 else if (dev_priv->chipset == 0xa0)
1579 xf_emit(ctx, 0x1873, 0);
1580 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1581 xf_emit(ctx, 0x8ba, 0);
1582 else
1583 xf_emit(ctx, 0x833, 0);
1584 xf_emit(ctx, 1, 0xf);
1585 xf_emit(ctx, 0xf, 0);
1586}
1587
1588static void
1589nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
1590{
1591 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1592 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
1593 xf_emit(ctx, 2, 0);
1594 if (dev_priv->chipset == 0x50)
1595 xf_emit(ctx, 2, 1);
1596 else
1597 xf_emit(ctx, 2, 0);
1598 xf_emit(ctx, 1, 0);
1599 xf_emit(ctx, 1, 1);
1600 xf_emit(ctx, 2, 0x100);
1601 xf_emit(ctx, 1, 0x11);
1602 xf_emit(ctx, 1, 0);
1603 xf_emit(ctx, 1, 8);
1604 xf_emit(ctx, 5, 0);
1605 xf_emit(ctx, 1, 1);
1606 xf_emit(ctx, 1, 0);
1607 xf_emit(ctx, 3, 1);
1608 xf_emit(ctx, 1, 0xcf);
1609 xf_emit(ctx, 1, 2);
1610 xf_emit(ctx, 6, 0);
1611 xf_emit(ctx, 1, 1);
1612 xf_emit(ctx, 1, 0);
1613 xf_emit(ctx, 3, 1);
1614 xf_emit(ctx, 4, 0);
1615 xf_emit(ctx, 1, 4);
1616 xf_emit(ctx, 1, 0);
1617 xf_emit(ctx, 1, 1);
1618 xf_emit(ctx, 1, 0x15);
1619 xf_emit(ctx, 3, 0);
1620 xf_emit(ctx, 1, 0x4444480);
1621 xf_emit(ctx, 0x37, 0);
1622}
1623
1624static void
1625nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
1626{
1627 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
1628 xf_emit(ctx, 4, 0);
1629 xf_emit(ctx, 1, 0x8100c12);
1630 xf_emit(ctx, 4, 0);
1631 xf_emit(ctx, 1, 0x100);
1632 xf_emit(ctx, 2, 0);
1633 xf_emit(ctx, 1, 0x10001);
1634 xf_emit(ctx, 1, 0);
1635 xf_emit(ctx, 1, 0x10001);
1636 xf_emit(ctx, 1, 1);
1637 xf_emit(ctx, 1, 0x10001);
1638 xf_emit(ctx, 1, 1);
1639 xf_emit(ctx, 1, 4);
1640 xf_emit(ctx, 1, 2);
1641}
1642
1643static void
1644nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
1645{
1646 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1647 /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
1648 xf_emit(ctx, 1, 0x3f800000);
1649 xf_emit(ctx, 6, 0);
1650 xf_emit(ctx, 1, 4);
1651 xf_emit(ctx, 1, 0x1a);
1652 xf_emit(ctx, 2, 0);
1653 xf_emit(ctx, 1, 1);
1654 xf_emit(ctx, 0x12, 0);
1655 xf_emit(ctx, 1, 0x00ffff00);
1656 xf_emit(ctx, 6, 0);
1657 xf_emit(ctx, 1, 0xf);
1658 xf_emit(ctx, 7, 0);
1659 xf_emit(ctx, 1, 0x0fac6881);
1660 xf_emit(ctx, 1, 0x11);
1661 xf_emit(ctx, 0xf, 0);
1662 xf_emit(ctx, 1, 4);
1663 xf_emit(ctx, 2, 0);
1664 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1665 xf_emit(ctx, 1, 3);
1666 else if (dev_priv->chipset >= 0xa0)
1667 xf_emit(ctx, 1, 1);
1668 xf_emit(ctx, 2, 0);
1669 xf_emit(ctx, 1, 2);
1670 xf_emit(ctx, 2, 0x04000000);
1671 xf_emit(ctx, 3, 0);
1672 xf_emit(ctx, 1, 5);
1673 xf_emit(ctx, 1, 0x52);
1674 if (dev_priv->chipset == 0x50) {
1675 xf_emit(ctx, 0x13, 0);
1676 } else {
1677 xf_emit(ctx, 4, 0);
1678 xf_emit(ctx, 1, 1);
1679 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1680 xf_emit(ctx, 0x11, 0);
1681 else
1682 xf_emit(ctx, 0x10, 0);
1683 }
1684 xf_emit(ctx, 0x10, 0x3f800000);
1685 xf_emit(ctx, 1, 0x10);
1686 xf_emit(ctx, 0x26, 0);
1687 xf_emit(ctx, 1, 0x8100c12);
1688 xf_emit(ctx, 1, 5);
1689 xf_emit(ctx, 2, 0);
1690 xf_emit(ctx, 1, 1);
1691 xf_emit(ctx, 1, 0);
1692 xf_emit(ctx, 4, 0xffff);
1693 if (dev_priv->chipset != 0x50)
1694 xf_emit(ctx, 1, 3);
1695 if (dev_priv->chipset < 0xa0)
1696 xf_emit(ctx, 0x1f, 0);
1697 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1698 xf_emit(ctx, 0xc, 0);
1699 else
1700 xf_emit(ctx, 3, 0);
1701 xf_emit(ctx, 1, 0x00ffff00);
1702 xf_emit(ctx, 1, 0x1a);
1703 if (dev_priv->chipset != 0x50) {
1704 xf_emit(ctx, 1, 0);
1705 xf_emit(ctx, 1, 3);
1706 }
1707 if (dev_priv->chipset < 0xa0)
1708 xf_emit(ctx, 0x26, 0);
1709 else
1710 xf_emit(ctx, 0x3c, 0);
1711 xf_emit(ctx, 1, 0x102);
1712 xf_emit(ctx, 1, 0);
1713 xf_emit(ctx, 4, 4);
1714 if (dev_priv->chipset >= 0xa0)
1715 xf_emit(ctx, 8, 0);
1716 xf_emit(ctx, 2, 4);
1717 xf_emit(ctx, 1, 0);
1718 if (dev_priv->chipset == 0x50)
1719 xf_emit(ctx, 1, 0x3ff);
1720 else
1721 xf_emit(ctx, 1, 0x7ff);
1722 xf_emit(ctx, 1, 0);
1723 xf_emit(ctx, 1, 0x102);
1724 xf_emit(ctx, 9, 0);
1725 xf_emit(ctx, 4, 4);
1726 xf_emit(ctx, 0x2c, 0);
1727}
1728
1729static void
1730nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
1731{
1732 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1733 int magic2;
1734 if (dev_priv->chipset == 0x50) {
1735 magic2 = 0x00003e60;
1736 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1737 magic2 = 0x001ffe67;
1738 } else {
1739 magic2 = 0x00087e67;
1740 }
1741 xf_emit(ctx, 8, 0);
1742 xf_emit(ctx, 1, 2);
1743 xf_emit(ctx, 1, 0);
1744 xf_emit(ctx, 1, magic2);
1745 xf_emit(ctx, 4, 0);
1746 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1747 xf_emit(ctx, 1, 1);
1748 xf_emit(ctx, 7, 0);
1749 if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
1750 xf_emit(ctx, 1, 0x15);
1751 xf_emit(ctx, 1, 0);
1752 xf_emit(ctx, 1, 1);
1753 xf_emit(ctx, 1, 0x10);
1754 xf_emit(ctx, 2, 0);
1755 xf_emit(ctx, 1, 1);
1756 xf_emit(ctx, 4, 0);
1757 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
1758 xf_emit(ctx, 1, 4);
1759 xf_emit(ctx, 1, 0x400);
1760 xf_emit(ctx, 1, 0x300);
1761 xf_emit(ctx, 1, 0x1001);
1762 if (dev_priv->chipset != 0xa0) {
1763 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1764 xf_emit(ctx, 1, 0);
1765 else
1766 xf_emit(ctx, 1, 0x15);
1767 }
1768 xf_emit(ctx, 3, 0);
1769 }
1770 xf_emit(ctx, 2, 0);
1771 xf_emit(ctx, 1, 2);
1772 xf_emit(ctx, 8, 0);
1773 xf_emit(ctx, 1, 1);
1774 xf_emit(ctx, 1, 0x10);
1775 xf_emit(ctx, 1, 0);
1776 xf_emit(ctx, 1, 1);
1777 xf_emit(ctx, 0x13, 0);
1778 xf_emit(ctx, 1, 0x10);
1779 xf_emit(ctx, 0x10, 0);
1780 xf_emit(ctx, 0x10, 0x3f800000);
1781 xf_emit(ctx, 0x19, 0);
1782 xf_emit(ctx, 1, 0x10);
1783 xf_emit(ctx, 1, 0);
1784 xf_emit(ctx, 1, 0x3f);
1785 xf_emit(ctx, 6, 0);
1786 xf_emit(ctx, 1, 1);
1787 xf_emit(ctx, 1, 0);
1788 xf_emit(ctx, 1, 1);
1789 xf_emit(ctx, 1, 0);
1790 xf_emit(ctx, 1, 1);
1791 if (dev_priv->chipset >= 0xa0) {
1792 xf_emit(ctx, 2, 0);
1793 xf_emit(ctx, 1, 0x1001);
1794 xf_emit(ctx, 0xb, 0);
1795 } else {
1796 xf_emit(ctx, 0xc, 0);
1797 }
1798 xf_emit(ctx, 1, 0x11);
1799 xf_emit(ctx, 7, 0);
1800 xf_emit(ctx, 1, 0xf);
1801 xf_emit(ctx, 7, 0);
1802 xf_emit(ctx, 1, 0x11);
1803 if (dev_priv->chipset == 0x50)
1804 xf_emit(ctx, 4, 0);
1805 else
1806 xf_emit(ctx, 6, 0);
1807 xf_emit(ctx, 3, 1);
1808 xf_emit(ctx, 1, 2);
1809 xf_emit(ctx, 1, 1);
1810 xf_emit(ctx, 1, 2);
1811 xf_emit(ctx, 1, 1);
1812 xf_emit(ctx, 1, 0);
1813 xf_emit(ctx, 1, magic2);
1814 xf_emit(ctx, 1, 0);
1815 xf_emit(ctx, 1, 0x0fac6881);
1816 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1817 xf_emit(ctx, 1, 0);
1818 xf_emit(ctx, 0x18, 1);
1819 xf_emit(ctx, 8, 2);
1820 xf_emit(ctx, 8, 1);
1821 xf_emit(ctx, 8, 2);
1822 xf_emit(ctx, 8, 1);
1823 xf_emit(ctx, 3, 0);
1824 xf_emit(ctx, 1, 1);
1825 xf_emit(ctx, 5, 0);
1826 xf_emit(ctx, 1, 1);
1827 xf_emit(ctx, 0x16, 0);
1828 } else {
1829 if (dev_priv->chipset >= 0xa0)
1830 xf_emit(ctx, 0x1b, 0);
1831 else
1832 xf_emit(ctx, 0x15, 0);
1833 }
1834 xf_emit(ctx, 1, 1);
1835 xf_emit(ctx, 1, 2);
1836 xf_emit(ctx, 2, 1);
1837 xf_emit(ctx, 1, 2);
1838 xf_emit(ctx, 2, 1);
1839 if (dev_priv->chipset >= 0xa0)
1840 xf_emit(ctx, 4, 0);
1841 else
1842 xf_emit(ctx, 3, 0);
1843 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1844 xf_emit(ctx, 0x10, 1);
1845 xf_emit(ctx, 8, 2);
1846 xf_emit(ctx, 0x10, 1);
1847 xf_emit(ctx, 8, 2);
1848 xf_emit(ctx, 8, 1);
1849 xf_emit(ctx, 3, 0);
1850 }
1851 xf_emit(ctx, 1, 0x11);
1852 xf_emit(ctx, 1, 1);
1853 xf_emit(ctx, 0x5b, 0);
1854}
1855
1856static void
1857nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
1858{
1859 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1860 int magic3;
1861 if (dev_priv->chipset == 0x50)
1862 magic3 = 0x1000;
1863 else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
1864 magic3 = 0x1e00;
1865 else
1866 magic3 = 0;
1867 xf_emit(ctx, 1, 0);
1868 xf_emit(ctx, 1, 4);
1869 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1870 xf_emit(ctx, 0x24, 0);
1871 else if (dev_priv->chipset >= 0xa0)
1872 xf_emit(ctx, 0x14, 0);
1873 else
1874 xf_emit(ctx, 0x15, 0);
1875 xf_emit(ctx, 2, 4);
1876 if (dev_priv->chipset >= 0xa0)
1877 xf_emit(ctx, 1, 0x03020100);
1878 else
1879 xf_emit(ctx, 1, 0x00608080);
1880 xf_emit(ctx, 4, 0);
1881 xf_emit(ctx, 1, 4);
1882 xf_emit(ctx, 2, 0);
1883 xf_emit(ctx, 2, 4);
1884 xf_emit(ctx, 1, 0x80);
1885 if (magic3)
1886 xf_emit(ctx, 1, magic3);
1887 xf_emit(ctx, 1, 4);
1888 xf_emit(ctx, 0x24, 0);
1889 xf_emit(ctx, 1, 4);
1890 xf_emit(ctx, 1, 0x80);
1891 xf_emit(ctx, 1, 4);
1892 xf_emit(ctx, 1, 0x03020100);
1893 xf_emit(ctx, 1, 3);
1894 if (magic3)
1895 xf_emit(ctx, 1, magic3);
1896 xf_emit(ctx, 1, 4);
1897 xf_emit(ctx, 4, 0);
1898 xf_emit(ctx, 1, 4);
1899 xf_emit(ctx, 1, 3);
1900 xf_emit(ctx, 3, 0);
1901 xf_emit(ctx, 1, 4);
1902 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
1903 xf_emit(ctx, 0x1024, 0);
1904 else if (dev_priv->chipset < 0xa0)
1905 xf_emit(ctx, 0xa24, 0);
1906 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1907 xf_emit(ctx, 0x214, 0);
1908 else
1909 xf_emit(ctx, 0x414, 0);
1910 xf_emit(ctx, 1, 4);
1911 xf_emit(ctx, 1, 3);
1912 xf_emit(ctx, 2, 0);
1913}
1914
1915static void
1916nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
1917{
1918 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1919 int magic1, magic2;
1920 if (dev_priv->chipset == 0x50) {
1921 magic1 = 0x3ff;
1922 magic2 = 0x00003e60;
1923 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1924 magic1 = 0x7ff;
1925 magic2 = 0x001ffe67;
1926 } else {
1927 magic1 = 0x7ff;
1928 magic2 = 0x00087e67;
1929 }
1930 xf_emit(ctx, 3, 0);
1931 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1932 xf_emit(ctx, 1, 1);
1933 xf_emit(ctx, 0xc, 0);
1934 xf_emit(ctx, 1, 0xf);
1935 xf_emit(ctx, 0xb, 0);
1936 xf_emit(ctx, 1, 4);
1937 xf_emit(ctx, 4, 0xffff);
1938 xf_emit(ctx, 8, 0);
1939 xf_emit(ctx, 1, 1);
1940 xf_emit(ctx, 3, 0);
1941 xf_emit(ctx, 1, 1);
1942 xf_emit(ctx, 5, 0);
1943 xf_emit(ctx, 1, 1);
1944 xf_emit(ctx, 2, 0);
1945 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1946 xf_emit(ctx, 1, 3);
1947 xf_emit(ctx, 1, 0);
1948 } else if (dev_priv->chipset >= 0xa0)
1949 xf_emit(ctx, 1, 1);
1950 xf_emit(ctx, 0xa, 0);
1951 xf_emit(ctx, 2, 1);
1952 xf_emit(ctx, 1, 2);
1953 xf_emit(ctx, 2, 1);
1954 xf_emit(ctx, 1, 2);
1955 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1956 xf_emit(ctx, 1, 0);
1957 xf_emit(ctx, 0x18, 1);
1958 xf_emit(ctx, 8, 2);
1959 xf_emit(ctx, 8, 1);
1960 xf_emit(ctx, 8, 2);
1961 xf_emit(ctx, 8, 1);
1962 xf_emit(ctx, 1, 0);
1963 }
1964 xf_emit(ctx, 1, 1);
1965 xf_emit(ctx, 1, 0);
1966 xf_emit(ctx, 1, 0x11);
1967 xf_emit(ctx, 7, 0);
1968 xf_emit(ctx, 1, 0x0fac6881);
1969 xf_emit(ctx, 2, 0);
1970 xf_emit(ctx, 1, 4);
1971 xf_emit(ctx, 3, 0);
1972 xf_emit(ctx, 1, 0x11);
1973 xf_emit(ctx, 1, 1);
1974 xf_emit(ctx, 1, 0);
1975 xf_emit(ctx, 3, 0xcf);
1976 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1977 xf_emit(ctx, 1, 1);
1978 xf_emit(ctx, 0xa, 0);
1979 xf_emit(ctx, 2, 1);
1980 xf_emit(ctx, 1, 2);
1981 xf_emit(ctx, 2, 1);
1982 xf_emit(ctx, 1, 2);
1983 xf_emit(ctx, 1, 1);
1984 xf_emit(ctx, 1, 0);
1985 xf_emit(ctx, 8, 1);
1986 xf_emit(ctx, 1, 0x11);
1987 xf_emit(ctx, 7, 0);
1988 xf_emit(ctx, 1, 0x0fac6881);
1989 xf_emit(ctx, 1, 0xf);
1990 xf_emit(ctx, 7, 0);
1991 xf_emit(ctx, 1, magic2);
1992 xf_emit(ctx, 2, 0);
1993 xf_emit(ctx, 1, 0x11);
1994 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1995 xf_emit(ctx, 2, 1);
1996 else
1997 xf_emit(ctx, 1, 1);
1998 if(dev_priv->chipset == 0x50)
1999 xf_emit(ctx, 1, 0);
2000 else
2001 xf_emit(ctx, 3, 0);
2002 xf_emit(ctx, 1, 4);
2003 xf_emit(ctx, 5, 0);
2004 xf_emit(ctx, 1, 1);
2005 xf_emit(ctx, 4, 0);
2006 xf_emit(ctx, 1, 0x11);
2007 xf_emit(ctx, 7, 0);
2008 xf_emit(ctx, 1, 0x0fac6881);
2009 xf_emit(ctx, 3, 0);
2010 xf_emit(ctx, 1, 0x11);
2011 xf_emit(ctx, 1, 1);
2012 xf_emit(ctx, 1, 0);
2013 xf_emit(ctx, 1, 1);
2014 xf_emit(ctx, 1, 0);
2015 xf_emit(ctx, 1, 1);
2016 xf_emit(ctx, 1, 0);
2017 xf_emit(ctx, 1, magic1);
2018 xf_emit(ctx, 1, 0);
2019 xf_emit(ctx, 1, 1);
2020 xf_emit(ctx, 1, 0);
2021 xf_emit(ctx, 1, 1);
2022 xf_emit(ctx, 2, 0);
2023 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2024 xf_emit(ctx, 1, 1);
2025 xf_emit(ctx, 0x28, 0);
2026 xf_emit(ctx, 8, 8);
2027 xf_emit(ctx, 1, 0x11);
2028 xf_emit(ctx, 7, 0);
2029 xf_emit(ctx, 1, 0x0fac6881);
2030 xf_emit(ctx, 8, 0x400);
2031 xf_emit(ctx, 8, 0x300);
2032 xf_emit(ctx, 1, 1);
2033 xf_emit(ctx, 1, 0xf);
2034 xf_emit(ctx, 7, 0);
2035 xf_emit(ctx, 1, 0x20);
2036 xf_emit(ctx, 1, 0x11);
2037 xf_emit(ctx, 1, 0x100);
2038 xf_emit(ctx, 1, 0);
2039 xf_emit(ctx, 1, 1);
2040 xf_emit(ctx, 2, 0);
2041 xf_emit(ctx, 1, 0x40);
2042 xf_emit(ctx, 1, 0x100);
2043 xf_emit(ctx, 1, 0);
2044 xf_emit(ctx, 1, 3);
2045 xf_emit(ctx, 4, 0);
2046 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2047 xf_emit(ctx, 1, 1);
2048 xf_emit(ctx, 1, magic2);
2049 xf_emit(ctx, 3, 0);
2050 xf_emit(ctx, 1, 2);
2051 xf_emit(ctx, 1, 0x0fac6881);
2052 xf_emit(ctx, 9, 0);
2053 xf_emit(ctx, 1, 1);
2054 xf_emit(ctx, 4, 0);
2055 xf_emit(ctx, 1, 4);
2056 xf_emit(ctx, 1, 0);
2057 xf_emit(ctx, 1, 1);
2058 xf_emit(ctx, 1, 0x400);
2059 xf_emit(ctx, 1, 0x300);
2060 xf_emit(ctx, 1, 0x1001);
2061 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2062 xf_emit(ctx, 4, 0);
2063 else
2064 xf_emit(ctx, 3, 0);
2065 xf_emit(ctx, 1, 0x11);
2066 xf_emit(ctx, 7, 0);
2067 xf_emit(ctx, 1, 0x0fac6881);
2068 xf_emit(ctx, 1, 0xf);
2069 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2070 xf_emit(ctx, 0x15, 0);
2071 xf_emit(ctx, 1, 1);
2072 xf_emit(ctx, 3, 0);
2073 } else
2074 xf_emit(ctx, 0x17, 0);
2075 if (dev_priv->chipset >= 0xa0)
2076 xf_emit(ctx, 1, 0x0fac6881);
2077 xf_emit(ctx, 1, magic2);
2078 xf_emit(ctx, 3, 0);
2079 xf_emit(ctx, 1, 0x11);
2080 xf_emit(ctx, 2, 0);
2081 xf_emit(ctx, 1, 4);
2082 xf_emit(ctx, 1, 0);
2083 xf_emit(ctx, 2, 1);
2084 xf_emit(ctx, 3, 0);
2085 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2086 xf_emit(ctx, 2, 1);
2087 else
2088 xf_emit(ctx, 1, 1);
2089 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2090 xf_emit(ctx, 2, 0);
2091 else if (dev_priv->chipset != 0x50)
2092 xf_emit(ctx, 1, 0);
2093}
2094
2095static void
2096nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
2097{
2098 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2099 xf_emit(ctx, 3, 0);
2100 xf_emit(ctx, 1, 1);
2101 xf_emit(ctx, 1, 0);
2102 xf_emit(ctx, 1, 1);
2103 if (dev_priv->chipset == 0x50)
2104 xf_emit(ctx, 2, 0);
2105 else
2106 xf_emit(ctx, 3, 0);
2107 xf_emit(ctx, 1, 0x2a712488);
2108 xf_emit(ctx, 1, 0);
2109 xf_emit(ctx, 1, 0x4085c000);
2110 xf_emit(ctx, 1, 0x40);
2111 xf_emit(ctx, 1, 0x100);
2112 xf_emit(ctx, 1, 0x10100);
2113 xf_emit(ctx, 1, 0x02800000);
2114}
2115
2116static void
2117nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
2118{
2119 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2120 xf_emit(ctx, 2, 0x04e3bfdf);
2121 xf_emit(ctx, 1, 1);
2122 xf_emit(ctx, 1, 0);
2123 xf_emit(ctx, 1, 0x00ffff00);
2124 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2125 xf_emit(ctx, 2, 1);
2126 else
2127 xf_emit(ctx, 1, 1);
2128 xf_emit(ctx, 2, 0);
2129 xf_emit(ctx, 1, 0x00ffff00);
2130 xf_emit(ctx, 8, 0);
2131 xf_emit(ctx, 1, 1);
2132 xf_emit(ctx, 1, 0);
2133 xf_emit(ctx, 1, 1);
2134 xf_emit(ctx, 1, 0x30201000);
2135 xf_emit(ctx, 1, 0x70605040);
2136 xf_emit(ctx, 1, 0xb8a89888);
2137 xf_emit(ctx, 1, 0xf8e8d8c8);
2138 xf_emit(ctx, 1, 0);
2139 xf_emit(ctx, 1, 0x1a);
2140}
2141
2142static void
2143nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
2144{
2145 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2146 xf_emit(ctx, 3, 0);
2147 xf_emit(ctx, 1, 0xfac6881);
2148 xf_emit(ctx, 4, 0);
2149 xf_emit(ctx, 1, 4);
2150 xf_emit(ctx, 1, 0);
2151 xf_emit(ctx, 2, 1);
2152 xf_emit(ctx, 2, 0);
2153 xf_emit(ctx, 1, 1);
2154 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2155 xf_emit(ctx, 0xb, 0);
2156 else
2157 xf_emit(ctx, 0xa, 0);
2158 xf_emit(ctx, 8, 1);
2159 xf_emit(ctx, 1, 0x11);
2160 xf_emit(ctx, 7, 0);
2161 xf_emit(ctx, 1, 0xfac6881);
2162 xf_emit(ctx, 1, 0xf);
2163 xf_emit(ctx, 7, 0);
2164 xf_emit(ctx, 1, 0x11);
2165 xf_emit(ctx, 1, 1);
2166 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2167 xf_emit(ctx, 6, 0);
2168 xf_emit(ctx, 1, 1);
2169 xf_emit(ctx, 6, 0);
2170 } else {
2171 xf_emit(ctx, 0xb, 0);
2172 }
2173}
2174
2175static void
2176nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
2177{
2178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2179 if (dev_priv->chipset < 0xa0) {
2180 nv50_graph_construct_xfer_tp_x1(ctx);
2181 nv50_graph_construct_xfer_tp_x2(ctx);
2182 nv50_graph_construct_xfer_tp_x3(ctx);
2183 if (dev_priv->chipset == 0x50)
2184 xf_emit(ctx, 0xf, 0);
2185 else
2186 xf_emit(ctx, 0x12, 0);
2187 nv50_graph_construct_xfer_tp_x4(ctx);
2188 } else {
2189 nv50_graph_construct_xfer_tp_x3(ctx);
2190 if (dev_priv->chipset < 0xaa)
2191 xf_emit(ctx, 0xc, 0);
2192 else
2193 xf_emit(ctx, 0xa, 0);
2194 nv50_graph_construct_xfer_tp_x2(ctx);
2195 nv50_graph_construct_xfer_tp_x5(ctx);
2196 nv50_graph_construct_xfer_tp_x4(ctx);
2197 nv50_graph_construct_xfer_tp_x1(ctx);
2198 }
2199}
2200
2201static void
2202nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
2203{
2204 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2205 int i, mpcnt;
2206 if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2207 mpcnt = 1;
2208 else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
2209 mpcnt = 2;
2210 else
2211 mpcnt = 3;
2212 for (i = 0; i < mpcnt; i++) {
2213 xf_emit(ctx, 1, 0);
2214 xf_emit(ctx, 1, 0x80);
2215 xf_emit(ctx, 1, 0x80007004);
2216 xf_emit(ctx, 1, 0x04000400);
2217 if (dev_priv->chipset >= 0xa0)
2218 xf_emit(ctx, 1, 0xc0);
2219 xf_emit(ctx, 1, 0x1000);
2220 xf_emit(ctx, 2, 0);
2221 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
2222 xf_emit(ctx, 1, 0xe00);
2223 xf_emit(ctx, 1, 0x1e00);
2224 }
2225 xf_emit(ctx, 1, 1);
2226 xf_emit(ctx, 2, 0);
2227 if (dev_priv->chipset == 0x50)
2228 xf_emit(ctx, 2, 0x1000);
2229 xf_emit(ctx, 1, 1);
2230 xf_emit(ctx, 1, 0);
2231 xf_emit(ctx, 1, 4);
2232 xf_emit(ctx, 1, 2);
2233 if (dev_priv->chipset >= 0xaa)
2234 xf_emit(ctx, 0xb, 0);
2235 else if (dev_priv->chipset >= 0xa0)
2236 xf_emit(ctx, 0xc, 0);
2237 else
2238 xf_emit(ctx, 0xa, 0);
2239 }
2240 xf_emit(ctx, 1, 0x08100c12);
2241 xf_emit(ctx, 1, 0);
2242 if (dev_priv->chipset >= 0xa0) {
2243 xf_emit(ctx, 1, 0x1fe21);
2244 }
2245 xf_emit(ctx, 5, 0);
2246 xf_emit(ctx, 4, 0xffff);
2247 xf_emit(ctx, 1, 1);
2248 xf_emit(ctx, 2, 0x10001);
2249 xf_emit(ctx, 1, 1);
2250 xf_emit(ctx, 1, 0);
2251 xf_emit(ctx, 1, 0x1fe21);
2252 xf_emit(ctx, 1, 0);
2253 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2254 xf_emit(ctx, 1, 1);
2255 xf_emit(ctx, 4, 0);
2256 xf_emit(ctx, 1, 0x08100c12);
2257 xf_emit(ctx, 1, 4);
2258 xf_emit(ctx, 1, 0);
2259 xf_emit(ctx, 1, 2);
2260 xf_emit(ctx, 1, 0x11);
2261 xf_emit(ctx, 8, 0);
2262 xf_emit(ctx, 1, 0xfac6881);
2263 xf_emit(ctx, 1, 0);
2264 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2265 xf_emit(ctx, 1, 3);
2266 xf_emit(ctx, 3, 0);
2267 xf_emit(ctx, 1, 4);
2268 xf_emit(ctx, 9, 0);
2269 xf_emit(ctx, 1, 2);
2270 xf_emit(ctx, 2, 1);
2271 xf_emit(ctx, 1, 2);
2272 xf_emit(ctx, 3, 1);
2273 xf_emit(ctx, 1, 0);
2274 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2275 xf_emit(ctx, 8, 2);
2276 xf_emit(ctx, 0x10, 1);
2277 xf_emit(ctx, 8, 2);
2278 xf_emit(ctx, 0x18, 1);
2279 xf_emit(ctx, 3, 0);
2280 }
2281 xf_emit(ctx, 1, 4);
2282 if (dev_priv->chipset == 0x50)
2283 xf_emit(ctx, 0x3a0, 0);
2284 else if (dev_priv->chipset < 0x94)
2285 xf_emit(ctx, 0x3a2, 0);
2286 else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2287 xf_emit(ctx, 0x39f, 0);
2288 else
2289 xf_emit(ctx, 0x3a3, 0);
2290 xf_emit(ctx, 1, 0x11);
2291 xf_emit(ctx, 1, 0);
2292 xf_emit(ctx, 1, 1);
2293 xf_emit(ctx, 0x2d, 0);
2294}
2295
2296static void
2297nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
2298{
2299 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2300 int i;
2301 uint32_t offset;
2302 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
2303 int size = 0;
2304
2305 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
2306
2307 if (dev_priv->chipset < 0xa0) {
2308 for (i = 0; i < 8; i++) {
2309 ctx->ctxvals_pos = offset + i;
2310 if (i == 0)
2311 xf_emit(ctx, 1, 0x08100c12);
2312 if (units & (1 << i))
2313 nv50_graph_construct_xfer_tp2(ctx);
2314 if ((ctx->ctxvals_pos-offset)/8 > size)
2315 size = (ctx->ctxvals_pos-offset)/8;
2316 }
2317 } else {
2318 /* Strand 0: TPs 0, 1 */
2319 ctx->ctxvals_pos = offset;
2320 xf_emit(ctx, 1, 0x08100c12);
2321 if (units & (1 << 0))
2322 nv50_graph_construct_xfer_tp2(ctx);
2323 if (units & (1 << 1))
2324 nv50_graph_construct_xfer_tp2(ctx);
2325 if ((ctx->ctxvals_pos-offset)/8 > size)
2326 size = (ctx->ctxvals_pos-offset)/8;
2327
2328 /* Strand 0: TPs 2, 3 */
2329 ctx->ctxvals_pos = offset + 1;
2330 if (units & (1 << 2))
2331 nv50_graph_construct_xfer_tp2(ctx);
2332 if (units & (1 << 3))
2333 nv50_graph_construct_xfer_tp2(ctx);
2334 if ((ctx->ctxvals_pos-offset)/8 > size)
2335 size = (ctx->ctxvals_pos-offset)/8;
2336
2337 /* Strand 0: TPs 4, 5, 6 */
2338 ctx->ctxvals_pos = offset + 2;
2339 if (units & (1 << 4))
2340 nv50_graph_construct_xfer_tp2(ctx);
2341 if (units & (1 << 5))
2342 nv50_graph_construct_xfer_tp2(ctx);
2343 if (units & (1 << 6))
2344 nv50_graph_construct_xfer_tp2(ctx);
2345 if ((ctx->ctxvals_pos-offset)/8 > size)
2346 size = (ctx->ctxvals_pos-offset)/8;
2347
2348 /* Strand 0: TPs 7, 8, 9 */
2349 ctx->ctxvals_pos = offset + 3;
2350 if (units & (1 << 7))
2351 nv50_graph_construct_xfer_tp2(ctx);
2352 if (units & (1 << 8))
2353 nv50_graph_construct_xfer_tp2(ctx);
2354 if (units & (1 << 9))
2355 nv50_graph_construct_xfer_tp2(ctx);
2356 if ((ctx->ctxvals_pos-offset)/8 > size)
2357 size = (ctx->ctxvals_pos-offset)/8;
2358 }
2359 ctx->ctxvals_pos = offset + size * 8;
2360 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
2361 cp_lsr (ctx, offset);
2362 cp_out (ctx, CP_SET_XFER_POINTER);
2363 cp_lsr (ctx, size);
2364 cp_out (ctx, CP_SEEK_2);
2365 cp_out (ctx, CP_XFER_2);
2366 cp_wait(ctx, XFER, BUSY);
2367}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index f0dc4e36ef05..de1f5b0062c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -390,7 +390,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
390 if (gpuobj->im_backing) 390 if (gpuobj->im_backing)
391 return -EINVAL; 391 return -EINVAL;
392 392
393 *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); 393 *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
394 if (*sz == 0) 394 if (*sz == 0)
395 return -EINVAL; 395 return -EINVAL;
396 396
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 1cc7b937b1ea..ed38262d9985 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -30,6 +30,9 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable 30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
31 $(call if_changed,mkregtable) 31 $(call if_changed,mkregtable)
32 32
33$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
34 $(call if_changed,mkregtable)
35
33$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h 36$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
34 37
35$(obj)/r200.o: $(obj)/r200_reg_safe.h 38$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -42,6 +45,8 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h
42 45
43$(obj)/rs600.o: $(obj)/rs600_reg_safe.h 46$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
44 47
48$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
49
45radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
46 radeon_irq.o r300_cmdbuf.o r600_cp.o 51 radeon_irq.o r300_cmdbuf.o r600_cp.o
47# add KMS driver 52# add KMS driver
@@ -54,8 +59,10 @@ radeon-y += radeon_device.o radeon_kms.o \
54 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 59 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
55 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 60 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
56 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 61 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
57 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o 62 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
63 evergreen.o
58 64
59radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 65radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
66radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
60 67
61obj-$(CONFIG_DRM_RADEON)+= radeon.o 68obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7f152f66f196..d75788feac6c 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
881 uint8_t attr = U8((*ptr)++), shift; 881 uint8_t attr = U8((*ptr)++), shift;
882 uint32_t saved, dst; 882 uint32_t saved, dst;
883 int dptr = *ptr; 883 int dptr = *ptr;
884 attr &= 0x38;
885 attr |= atom_def_dst[attr >> 3] << 6;
886 SDEBUG(" dst: "); 884 SDEBUG(" dst: ");
887 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 885 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
888 shift = atom_get_src(ctx, attr, ptr); 886 shift = atom_get_src(ctx, attr, ptr);
@@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
897 uint8_t attr = U8((*ptr)++), shift; 895 uint8_t attr = U8((*ptr)++), shift;
898 uint32_t saved, dst; 896 uint32_t saved, dst;
899 int dptr = *ptr; 897 int dptr = *ptr;
900 attr &= 0x38;
901 attr |= atom_def_dst[attr >> 3] << 6;
902 SDEBUG(" dst: "); 898 SDEBUG(" dst: ");
903 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 899 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
904 shift = atom_get_src(ctx, attr, ptr); 900 shift = atom_get_src(ctx, attr, ptr);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 91ad0d1c1b17..6732b5dd8ff4 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2006-2007 Advanced Micro Devices, Inc. 2 * Copyright 2006-2007 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,10 +20,12 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23/****************************************************************************/ 23
24/****************************************************************************/
24/*Portion I: Definitions shared between VBIOS and Driver */ 25/*Portion I: Definitions shared between VBIOS and Driver */
25/****************************************************************************/ 26/****************************************************************************/
26 27
28
27#ifndef _ATOMBIOS_H 29#ifndef _ATOMBIOS_H
28#define _ATOMBIOS_H 30#define _ATOMBIOS_H
29 31
@@ -40,39 +42,46 @@
40#endif 42#endif
41 43
42#ifdef _H2INC 44#ifdef _H2INC
43#ifndef ULONG 45 #ifndef ULONG
44typedef unsigned long ULONG; 46 typedef unsigned long ULONG;
45#endif 47 #endif
46 48
47#ifndef UCHAR 49 #ifndef UCHAR
48typedef unsigned char UCHAR; 50 typedef unsigned char UCHAR;
49#endif 51 #endif
50 52
51#ifndef USHORT 53 #ifndef USHORT
52typedef unsigned short USHORT; 54 typedef unsigned short USHORT;
53#endif 55 #endif
54#endif 56#endif
55 57
56#define ATOM_DAC_A 0 58#define ATOM_DAC_A 0
57#define ATOM_DAC_B 1 59#define ATOM_DAC_B 1
58#define ATOM_EXT_DAC 2 60#define ATOM_EXT_DAC 2
59 61
60#define ATOM_CRTC1 0 62#define ATOM_CRTC1 0
61#define ATOM_CRTC2 1 63#define ATOM_CRTC2 1
64#define ATOM_CRTC3 2
65#define ATOM_CRTC4 3
66#define ATOM_CRTC5 4
67#define ATOM_CRTC6 5
68#define ATOM_CRTC_INVALID 0xFF
62 69
63#define ATOM_DIGA 0 70#define ATOM_DIGA 0
64#define ATOM_DIGB 1 71#define ATOM_DIGB 1
65 72
66#define ATOM_PPLL1 0 73#define ATOM_PPLL1 0
67#define ATOM_PPLL2 1 74#define ATOM_PPLL2 1
75#define ATOM_DCPLL 2
76#define ATOM_PPLL_INVALID 0xFF
68 77
69#define ATOM_SCALER1 0 78#define ATOM_SCALER1 0
70#define ATOM_SCALER2 1 79#define ATOM_SCALER2 1
71 80
72#define ATOM_SCALER_DISABLE 0 81#define ATOM_SCALER_DISABLE 0
73#define ATOM_SCALER_CENTER 1 82#define ATOM_SCALER_CENTER 1
74#define ATOM_SCALER_EXPANSION 2 83#define ATOM_SCALER_EXPANSION 2
75#define ATOM_SCALER_MULTI_EX 3 84#define ATOM_SCALER_MULTI_EX 3
76 85
77#define ATOM_DISABLE 0 86#define ATOM_DISABLE 0
78#define ATOM_ENABLE 1 87#define ATOM_ENABLE 1
@@ -82,6 +91,7 @@ typedef unsigned short USHORT;
82#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) 91#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
83#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) 92#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
84#define ATOM_ENCODER_INIT (ATOM_DISABLE+7) 93#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
94#define ATOM_GET_STATUS (ATOM_DISABLE+8)
85 95
86#define ATOM_BLANKING 1 96#define ATOM_BLANKING 1
87#define ATOM_BLANKING_OFF 0 97#define ATOM_BLANKING_OFF 0
@@ -114,7 +124,7 @@ typedef unsigned short USHORT;
114#define ATOM_DAC2_CV ATOM_DAC1_CV 124#define ATOM_DAC2_CV ATOM_DAC1_CV
115#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC 125#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
116#define ATOM_DAC2_PAL ATOM_DAC1_PAL 126#define ATOM_DAC2_PAL ATOM_DAC1_PAL
117 127
118#define ATOM_PM_ON 0 128#define ATOM_PM_ON 0
119#define ATOM_PM_STANDBY 1 129#define ATOM_PM_STANDBY 1
120#define ATOM_PM_SUSPEND 2 130#define ATOM_PM_SUSPEND 2
@@ -134,6 +144,7 @@ typedef unsigned short USHORT;
134#define ATOM_PANEL_MISC_TEMPORAL 0x00000040 144#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
135#define ATOM_PANEL_MISC_API_ENABLED 0x00000080 145#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
136 146
147
137#define MEMTYPE_DDR1 "DDR1" 148#define MEMTYPE_DDR1 "DDR1"
138#define MEMTYPE_DDR2 "DDR2" 149#define MEMTYPE_DDR2 "DDR2"
139#define MEMTYPE_DDR3 "DDR3" 150#define MEMTYPE_DDR3 "DDR3"
@@ -145,19 +156,19 @@ typedef unsigned short USHORT;
145 156
146/* Maximum size of that FireGL flag string */ 157/* Maximum size of that FireGL flag string */
147 158
148#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */ 159#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support
149#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */ 160#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING )
150 161
151#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */ 162#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop
152#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 163#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
153 164
154#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */ 165#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support
155#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */ 166#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING )
156 167
157#define HW_ASSISTED_I2C_STATUS_FAILURE 2 168#define HW_ASSISTED_I2C_STATUS_FAILURE 2
158#define HW_ASSISTED_I2C_STATUS_SUCCESS 1 169#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
159 170
160#pragma pack(1) /* BIOS data must use byte aligment */ 171#pragma pack(1) /* BIOS data must use byte aligment */
161 172
162/* Define offset to location of ROM header. */ 173/* Define offset to location of ROM header. */
163 174
@@ -165,367 +176,410 @@ typedef unsigned short USHORT;
165#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L 176#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
166 177
167#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 178#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
168#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ 179#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
169#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f 180#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
170#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e 181#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
171 182
172/* Common header for all ROM Data tables. 183/* Common header for all ROM Data tables.
173 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. 184 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
174 And the pointer actually points to this header. */ 185 And the pointer actually points to this header. */
175 186
176typedef struct _ATOM_COMMON_TABLE_HEADER { 187typedef struct _ATOM_COMMON_TABLE_HEADER
177 USHORT usStructureSize; 188{
178 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ 189 USHORT usStructureSize;
179 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ 190 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
180 /*Image can't be updated, while Driver needs to carry the new table! */ 191 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
181} ATOM_COMMON_TABLE_HEADER; 192 /*Image can't be updated, while Driver needs to carry the new table! */
182 193}ATOM_COMMON_TABLE_HEADER;
183typedef struct _ATOM_ROM_HEADER { 194
184 ATOM_COMMON_TABLE_HEADER sHeader; 195typedef struct _ATOM_ROM_HEADER
185 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, 196{
186 atombios should init it as "ATOM", don't change the position */ 197 ATOM_COMMON_TABLE_HEADER sHeader;
187 USHORT usBiosRuntimeSegmentAddress; 198 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
188 USHORT usProtectedModeInfoOffset; 199 atombios should init it as "ATOM", don't change the position */
189 USHORT usConfigFilenameOffset; 200 USHORT usBiosRuntimeSegmentAddress;
190 USHORT usCRC_BlockOffset; 201 USHORT usProtectedModeInfoOffset;
191 USHORT usBIOS_BootupMessageOffset; 202 USHORT usConfigFilenameOffset;
192 USHORT usInt10Offset; 203 USHORT usCRC_BlockOffset;
193 USHORT usPciBusDevInitCode; 204 USHORT usBIOS_BootupMessageOffset;
194 USHORT usIoBaseAddress; 205 USHORT usInt10Offset;
195 USHORT usSubsystemVendorID; 206 USHORT usPciBusDevInitCode;
196 USHORT usSubsystemID; 207 USHORT usIoBaseAddress;
197 USHORT usPCI_InfoOffset; 208 USHORT usSubsystemVendorID;
198 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ 209 USHORT usSubsystemID;
199 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ 210 USHORT usPCI_InfoOffset;
200 UCHAR ucExtendedFunctionCode; 211 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
201 UCHAR ucReserved; 212 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
202} ATOM_ROM_HEADER; 213 UCHAR ucExtendedFunctionCode;
214 UCHAR ucReserved;
215}ATOM_ROM_HEADER;
203 216
204/*==============================Command Table Portion==================================== */ 217/*==============================Command Table Portion==================================== */
205 218
206#ifdef UEFI_BUILD 219#ifdef UEFI_BUILD
207#define UTEMP USHORT 220 #define UTEMP USHORT
208#define USHORT void* 221 #define USHORT void*
209#endif 222#endif
210 223
211typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES { 224typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
212 USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */ 225 USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
213 USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */ 226 USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
214 USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 227 USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
215 USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */ 228 USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios
216 USHORT DIGxEncoderControl; /* Only used by Bios */ 229 USHORT DIGxEncoderControl; //Only used by Bios
217 USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 230 USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
218 USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */ 231 USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1
219 USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */ 232 USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed
220 USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */ 233 USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2
221 USHORT GPIOPinControl; /* Atomic Table, only used by Bios */ 234 USHORT GPIOPinControl; //Atomic Table, only used by Bios
222 USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */ 235 USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
223 USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */ 236 USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
224 USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */ 237 USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
225 USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 238 USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
226 USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 239 USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
227 USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 240 USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
228 USHORT MemoryPLLInit; 241 USHORT MemoryPLLInit;
229 USHORT AdjustDisplayPll; /* only used by Bios */ 242 USHORT AdjustDisplayPll; //only used by Bios
230 USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 243 USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
231 USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */ 244 USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
232 USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */ 245 USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
233 USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */ 246 USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
234 USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */ 247 USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
235 USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 248 USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
236 USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 249 USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
237 USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 250 USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
238 USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 251 USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
239 USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 252 USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
240 USHORT GetConditionalGoldenSetting; /* only used by Bios */ 253 USHORT GetConditionalGoldenSetting; //only used by Bios
241 USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */ 254 USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
242 USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ 255 USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
243 USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ 256 USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
244 USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 257 USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
245 USHORT EnableScaler; /* Atomic Table, used only by Bios */ 258 USHORT EnableScaler; //Atomic Table, used only by Bios
246 USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 259 USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
247 USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 260 USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
248 USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 261 USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1
249 USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */ 262 USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1
250 USHORT EnableVGA_Access; /* Obsolete , only used by Bios */ 263 USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
251 USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 264 USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
252 USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */ 265 USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
253 USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */ 266 USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
254 USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 267 USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
255 USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */ 268 USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
256 USHORT UpdateCRTC_DoubleBufferRegisters; 269 USHORT UpdateCRTC_DoubleBufferRegisters;
257 USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */ 270 USHORT LUT_AutoFill; //Atomic Table, only used by Bios
258 USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */ 271 USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
259 USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 272 USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
260 USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 273 USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
261 USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 274 USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
262 USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */ 275 USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1
263 USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 276 USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
264 USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */ 277 USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios
265 USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */ 278 USHORT MemoryCleanUp; //Atomic Table, only used by Bios
266 USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */ 279 USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios
267 USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */ 280 USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components
268 USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */ 281 USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components
269 USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */ 282 USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init
270 USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 283 USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
271 USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 284 USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
272 USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */ 285 USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
273 USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */ 286 USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
274 USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */ 287 USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
275 USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 288 USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
276 USHORT MemoryTraining; /* Atomic Table, used only by Bios */ 289 USHORT MemoryTraining; //Atomic Table, used only by Bios
277 USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */ 290 USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2
278 USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 291 USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
279 USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */ 292 USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
280 USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 293 USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
281 USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 294 USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
282 USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */ 295 USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
283 USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 296 USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
284 USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 297 USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
285 USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */ 298 USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
286 USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 299 USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
287 USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 300 USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
288 USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 301 USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
289 USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 302 USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
290 USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */ 303 USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
291 USHORT DPEncoderService; /* Function Table,only used by Bios */ 304 USHORT DPEncoderService; //Function Table,only used by Bios
292} ATOM_MASTER_LIST_OF_COMMAND_TABLES; 305}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
293 306
294/* For backward compatible */ 307// For backward compatible
295#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction 308#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
296#define UNIPHYTransmitterControl DIG1TransmitterControl 309#define UNIPHYTransmitterControl DIG1TransmitterControl
297#define LVTMATransmitterControl DIG2TransmitterControl 310#define LVTMATransmitterControl DIG2TransmitterControl
298#define SetCRTC_DPM_State GetConditionalGoldenSetting 311#define SetCRTC_DPM_State GetConditionalGoldenSetting
299#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange 312#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
313#define HPDInterruptService ReadHWAssistedI2CStatus
314#define EnableVGA_Access GetSCLKOverMCLKRatio
300 315
301typedef struct _ATOM_MASTER_COMMAND_TABLE { 316typedef struct _ATOM_MASTER_COMMAND_TABLE
302 ATOM_COMMON_TABLE_HEADER sHeader; 317{
303 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; 318 ATOM_COMMON_TABLE_HEADER sHeader;
304} ATOM_MASTER_COMMAND_TABLE; 319 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
305 320}ATOM_MASTER_COMMAND_TABLE;
306/****************************************************************************/ 321
307/* Structures used in every command table */ 322/****************************************************************************/
308/****************************************************************************/ 323// Structures used in every command table
309typedef struct _ATOM_TABLE_ATTRIBUTE { 324/****************************************************************************/
325typedef struct _ATOM_TABLE_ATTRIBUTE
326{
310#if ATOM_BIG_ENDIAN 327#if ATOM_BIG_ENDIAN
311 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ 328 USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
312 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ 329 USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
313 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ 330 USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
314#else 331#else
315 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ 332 USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
316 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ 333 USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
317 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ 334 USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
318#endif 335#endif
319} ATOM_TABLE_ATTRIBUTE; 336}ATOM_TABLE_ATTRIBUTE;
320
321typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
322 ATOM_TABLE_ATTRIBUTE sbfAccess;
323 USHORT susAccess;
324} ATOM_TABLE_ATTRIBUTE_ACCESS;
325 337
326/****************************************************************************/ 338typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
327/* Common header for all command tables. */ 339{
328/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */ 340 ATOM_TABLE_ATTRIBUTE sbfAccess;
329/* And the pointer actually points to this header. */ 341 USHORT susAccess;
330/****************************************************************************/ 342}ATOM_TABLE_ATTRIBUTE_ACCESS;
331typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER { 343
332 ATOM_COMMON_TABLE_HEADER CommonHeader; 344/****************************************************************************/
333 ATOM_TABLE_ATTRIBUTE TableAttribute; 345// Common header for all command tables.
334} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; 346// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
347// And the pointer actually points to this header.
348/****************************************************************************/
349typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
350{
351 ATOM_COMMON_TABLE_HEADER CommonHeader;
352 ATOM_TABLE_ATTRIBUTE TableAttribute;
353}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
335 354
336/****************************************************************************/ 355/****************************************************************************/
337/* Structures used by ComputeMemoryEnginePLLTable */ 356// Structures used by ComputeMemoryEnginePLLTable
338/****************************************************************************/ 357/****************************************************************************/
339#define COMPUTE_MEMORY_PLL_PARAM 1 358#define COMPUTE_MEMORY_PLL_PARAM 1
340#define COMPUTE_ENGINE_PLL_PARAM 2 359#define COMPUTE_ENGINE_PLL_PARAM 2
341 360
342typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS { 361typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
343 ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */ 362{
344 UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */ 363 ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
345 UCHAR ucReserved; /* may expand to return larger Fbdiv later */ 364 UCHAR ucAction; //0:reserved //1:Memory //2:Engine
346 UCHAR ucFbDiv; /* return value */ 365 UCHAR ucReserved; //may expand to return larger Fbdiv later
347 UCHAR ucPostDiv; /* return value */ 366 UCHAR ucFbDiv; //return value
348} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; 367 UCHAR ucPostDiv; //return value
349 368}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
350typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 { 369
351 ULONG ulClock; /* When return, [23:0] return real clock */ 370typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
352 UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */ 371{
353 USHORT usFbDiv; /* return Feedback value to be written to register */ 372 ULONG ulClock; //When return, [23:0] return real clock
354 UCHAR ucPostDiv; /* return post div to be written to register */ 373 UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
355} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; 374 USHORT usFbDiv; //return Feedback value to be written to register
375 UCHAR ucPostDiv; //return post div to be written to register
376}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
356#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS 377#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
357 378
358#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */ 379
359#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ 380#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value
360#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ 381#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
361#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ 382#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
362#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ 383#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
363#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ 384#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
385#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
364#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK 386#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
365 387
366#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ 388#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
367#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ 389#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
368#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ 390#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
369#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ 391#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
370#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ 392#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
371 393
372typedef struct _ATOM_COMPUTE_CLOCK_FREQ { 394typedef struct _ATOM_COMPUTE_CLOCK_FREQ
395{
373#if ATOM_BIG_ENDIAN 396#if ATOM_BIG_ENDIAN
374 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ 397 ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
375 ULONG ulClockFreq:24; /* in unit of 10kHz */ 398 ULONG ulClockFreq:24; // in unit of 10kHz
376#else 399#else
377 ULONG ulClockFreq:24; /* in unit of 10kHz */ 400 ULONG ulClockFreq:24; // in unit of 10kHz
378 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ 401 ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
379#endif 402#endif
380} ATOM_COMPUTE_CLOCK_FREQ; 403}ATOM_COMPUTE_CLOCK_FREQ;
381
382typedef struct _ATOM_S_MPLL_FB_DIVIDER {
383 USHORT usFbDivFrac;
384 USHORT usFbDiv;
385} ATOM_S_MPLL_FB_DIVIDER;
386 404
387typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 { 405typedef struct _ATOM_S_MPLL_FB_DIVIDER
388 union { 406{
389 ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */ 407 USHORT usFbDivFrac;
390 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */ 408 USHORT usFbDiv;
391 }; 409}ATOM_S_MPLL_FB_DIVIDER;
392 UCHAR ucRefDiv; /* Output Parameter */
393 UCHAR ucPostDiv; /* Output Parameter */
394 UCHAR ucCntlFlag; /* Output Parameter */
395 UCHAR ucReserved;
396} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
397 410
398/* ucCntlFlag */ 411typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
412{
413 union
414 {
415 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
416 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
417 };
418 UCHAR ucRefDiv; //Output Parameter
419 UCHAR ucPostDiv; //Output Parameter
420 UCHAR ucCntlFlag; //Output Parameter
421 UCHAR ucReserved;
422}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
423
424// ucCntlFlag
399#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 425#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
400#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 426#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
401#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 427#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
428#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8
402 429
403typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
404 ATOM_COMPUTE_CLOCK_FREQ ulClock;
405 ULONG ulReserved[2];
406} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
407
408typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
409 ATOM_COMPUTE_CLOCK_FREQ ulClock;
410 ULONG ulMemoryClock;
411 ULONG ulReserved;
412} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
413
414/****************************************************************************/
415/* Structures used by SetEngineClockTable */
416/****************************************************************************/
417typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
418 ULONG ulTargetEngineClock; /* In 10Khz unit */
419} SET_ENGINE_CLOCK_PARAMETERS;
420 430
421typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION { 431// V4 are only used for APU which PLL outside GPU
422 ULONG ulTargetEngineClock; /* In 10Khz unit */ 432typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
423 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; 433{
424} SET_ENGINE_CLOCK_PS_ALLOCATION; 434#if ATOM_BIG_ENDIAN
435 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
436 ULONG ulClock:24; //Input= target clock, output = actual clock
437#else
438 ULONG ulClock:24; //Input= target clock, output = actual clock
439 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
440#endif
441}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
425 442
426/****************************************************************************/ 443typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
427/* Structures used by SetMemoryClockTable */ 444{
428/****************************************************************************/ 445 ATOM_COMPUTE_CLOCK_FREQ ulClock;
429typedef struct _SET_MEMORY_CLOCK_PARAMETERS { 446 ULONG ulReserved[2];
430 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 447}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
431} SET_MEMORY_CLOCK_PARAMETERS;
432 448
433typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION { 449typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
434 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 450{
435 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; 451 ATOM_COMPUTE_CLOCK_FREQ ulClock;
436} SET_MEMORY_CLOCK_PS_ALLOCATION; 452 ULONG ulMemoryClock;
453 ULONG ulReserved;
454}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
455
456/****************************************************************************/
457// Structures used by SetEngineClockTable
458/****************************************************************************/
459typedef struct _SET_ENGINE_CLOCK_PARAMETERS
460{
461 ULONG ulTargetEngineClock; //In 10Khz unit
462}SET_ENGINE_CLOCK_PARAMETERS;
437 463
438/****************************************************************************/ 464typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
439/* Structures used by ASIC_Init.ctb */ 465{
440/****************************************************************************/ 466 ULONG ulTargetEngineClock; //In 10Khz unit
441typedef struct _ASIC_INIT_PARAMETERS { 467 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
442 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 468}SET_ENGINE_CLOCK_PS_ALLOCATION;
443 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 469
444} ASIC_INIT_PARAMETERS; 470/****************************************************************************/
471// Structures used by SetMemoryClockTable
472/****************************************************************************/
473typedef struct _SET_MEMORY_CLOCK_PARAMETERS
474{
475 ULONG ulTargetMemoryClock; //In 10Khz unit
476}SET_MEMORY_CLOCK_PARAMETERS;
445 477
446typedef struct _ASIC_INIT_PS_ALLOCATION { 478typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
447 ASIC_INIT_PARAMETERS sASICInitClocks; 479{
448 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */ 480 ULONG ulTargetMemoryClock; //In 10Khz unit
449} ASIC_INIT_PS_ALLOCATION; 481 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
482}SET_MEMORY_CLOCK_PS_ALLOCATION;
483
484/****************************************************************************/
485// Structures used by ASIC_Init.ctb
486/****************************************************************************/
487typedef struct _ASIC_INIT_PARAMETERS
488{
489 ULONG ulDefaultEngineClock; //In 10Khz unit
490 ULONG ulDefaultMemoryClock; //In 10Khz unit
491}ASIC_INIT_PARAMETERS;
450 492
451/****************************************************************************/ 493typedef struct _ASIC_INIT_PS_ALLOCATION
452/* Structure used by DynamicClockGatingTable.ctb */ 494{
453/****************************************************************************/ 495 ASIC_INIT_PARAMETERS sASICInitClocks;
454typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS { 496 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
455 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 497}ASIC_INIT_PS_ALLOCATION;
456 UCHAR ucPadding[3]; 498
457} DYNAMIC_CLOCK_GATING_PARAMETERS; 499/****************************************************************************/
500// Structure used by DynamicClockGatingTable.ctb
501/****************************************************************************/
502typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
503{
504 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
505 UCHAR ucPadding[3];
506}DYNAMIC_CLOCK_GATING_PARAMETERS;
458#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS 507#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
459 508
460/****************************************************************************/ 509/****************************************************************************/
461/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */ 510// Structure used by EnableASIC_StaticPwrMgtTable.ctb
462/****************************************************************************/ 511/****************************************************************************/
463typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS { 512typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
464 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 513{
465 UCHAR ucPadding[3]; 514 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
466} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; 515 UCHAR ucPadding[3];
516}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
467#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS 517#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
468 518
469/****************************************************************************/ 519/****************************************************************************/
470/* Structures used by DAC_LoadDetectionTable.ctb */ 520// Structures used by DAC_LoadDetectionTable.ctb
471/****************************************************************************/ 521/****************************************************************************/
472typedef struct _DAC_LOAD_DETECTION_PARAMETERS { 522typedef struct _DAC_LOAD_DETECTION_PARAMETERS
473 USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */ 523{
474 UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */ 524 USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
475 UCHAR ucMisc; /* Valid only when table revision =1.3 and above */ 525 UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
476} DAC_LOAD_DETECTION_PARAMETERS; 526 UCHAR ucMisc; //Valid only when table revision =1.3 and above
527}DAC_LOAD_DETECTION_PARAMETERS;
477 528
478/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */ 529// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
479#define DAC_LOAD_MISC_YPrPb 0x01 530#define DAC_LOAD_MISC_YPrPb 0x01
480 531
481typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION { 532typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
482 DAC_LOAD_DETECTION_PARAMETERS sDacload; 533{
483 ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */ 534 DAC_LOAD_DETECTION_PARAMETERS sDacload;
484} DAC_LOAD_DETECTION_PS_ALLOCATION; 535 ULONG Reserved[2];// Don't set this one, allocation for EXT DAC
485 536}DAC_LOAD_DETECTION_PS_ALLOCATION;
486/****************************************************************************/ 537
487/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */ 538/****************************************************************************/
488/****************************************************************************/ 539// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
489typedef struct _DAC_ENCODER_CONTROL_PARAMETERS { 540/****************************************************************************/
490 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 541typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
491 UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */ 542{
492 UCHAR ucAction; /* 0: turn off encoder */ 543 USHORT usPixelClock; // in 10KHz; for bios convenient
493 /* 1: setup and turn on encoder */ 544 UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
494 /* 7: ATOM_ENCODER_INIT Initialize DAC */ 545 UCHAR ucAction; // 0: turn off encoder
495} DAC_ENCODER_CONTROL_PARAMETERS; 546 // 1: setup and turn on encoder
547 // 7: ATOM_ENCODER_INIT Initialize DAC
548}DAC_ENCODER_CONTROL_PARAMETERS;
496 549
497#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS 550#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
498 551
499/****************************************************************************/ 552/****************************************************************************/
500/* Structures used by DIG1EncoderControlTable */ 553// Structures used by DIG1EncoderControlTable
501/* DIG2EncoderControlTable */ 554// DIG2EncoderControlTable
502/* ExternalEncoderControlTable */ 555// ExternalEncoderControlTable
503/****************************************************************************/ 556/****************************************************************************/
504typedef struct _DIG_ENCODER_CONTROL_PARAMETERS { 557typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
505 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 558{
506 UCHAR ucConfig; 559 USHORT usPixelClock; // in 10KHz; for bios convenient
507 /* [2] Link Select: */ 560 UCHAR ucConfig;
508 /* =0: PHY linkA if bfLane<3 */ 561 // [2] Link Select:
509 /* =1: PHY linkB if bfLanes<3 */ 562 // =0: PHY linkA if bfLane<3
510 /* =0: PHY linkA+B if bfLanes=3 */ 563 // =1: PHY linkB if bfLanes<3
511 /* [3] Transmitter Sel */ 564 // =0: PHY linkA+B if bfLanes=3
512 /* =0: UNIPHY or PCIEPHY */ 565 // [3] Transmitter Sel
513 /* =1: LVTMA */ 566 // =0: UNIPHY or PCIEPHY
514 UCHAR ucAction; /* =0: turn off encoder */ 567 // =1: LVTMA
515 /* =1: turn on encoder */ 568 UCHAR ucAction; // =0: turn off encoder
516 UCHAR ucEncoderMode; 569 // =1: turn on encoder
517 /* =0: DP encoder */ 570 UCHAR ucEncoderMode;
518 /* =1: LVDS encoder */ 571 // =0: DP encoder
519 /* =2: DVI encoder */ 572 // =1: LVDS encoder
520 /* =3: HDMI encoder */ 573 // =2: DVI encoder
521 /* =4: SDVO encoder */ 574 // =3: HDMI encoder
522 UCHAR ucLaneNum; /* how many lanes to enable */ 575 // =4: SDVO encoder
523 UCHAR ucReserved[2]; 576 UCHAR ucLaneNum; // how many lanes to enable
524} DIG_ENCODER_CONTROL_PARAMETERS; 577 UCHAR ucReserved[2];
578}DIG_ENCODER_CONTROL_PARAMETERS;
525#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS 579#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
526#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS 580#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
527 581
528/* ucConfig */ 582//ucConfig
529#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 583#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
530#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 584#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
531#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 585#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
@@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
539#define ATOM_ENCODER_CONFIG_LVTMA 0x08 593#define ATOM_ENCODER_CONFIG_LVTMA 0x08
540#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 594#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
541#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 595#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
542#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */ 596#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0
543/* ucAction */ 597// ucAction
544/* ATOM_ENABLE: Enable Encoder */ 598// ATOM_ENABLE: Enable Encoder
545/* ATOM_DISABLE: Disable Encoder */ 599// ATOM_DISABLE: Disable Encoder
546 600
547/* ucEncoderMode */ 601//ucEncoderMode
548#define ATOM_ENCODER_MODE_DP 0 602#define ATOM_ENCODER_MODE_DP 0
549#define ATOM_ENCODER_MODE_LVDS 1 603#define ATOM_ENCODER_MODE_LVDS 1
550#define ATOM_ENCODER_MODE_DVI 2 604#define ATOM_ENCODER_MODE_DVI 2
551#define ATOM_ENCODER_MODE_HDMI 3 605#define ATOM_ENCODER_MODE_HDMI 3
552#define ATOM_ENCODER_MODE_SDVO 4 606#define ATOM_ENCODER_MODE_SDVO 4
607#define ATOM_ENCODER_MODE_DP_AUDIO 5
553#define ATOM_ENCODER_MODE_TV 13 608#define ATOM_ENCODER_MODE_TV 13
554#define ATOM_ENCODER_MODE_CV 14 609#define ATOM_ENCODER_MODE_CV 14
555#define ATOM_ENCODER_MODE_CRT 15 610#define ATOM_ENCODER_MODE_CRT 15
556 611
557typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 { 612typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
613{
558#if ATOM_BIG_ENDIAN 614#if ATOM_BIG_ENDIAN
559 UCHAR ucReserved1:2; 615 UCHAR ucReserved1:2;
560 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ 616 UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
561 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ 617 UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
562 UCHAR ucReserved:1; 618 UCHAR ucReserved:1;
563 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ 619 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
564#else 620#else
565 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ 621 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
566 UCHAR ucReserved:1; 622 UCHAR ucReserved:1;
567 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ 623 UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
568 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ 624 UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
569 UCHAR ucReserved1:2; 625 UCHAR ucReserved1:2;
570#endif 626#endif
571} ATOM_DIG_ENCODER_CONFIG_V2; 627}ATOM_DIG_ENCODER_CONFIG_V2;
572 628
573typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
574 USHORT usPixelClock; /* in 10KHz; for bios convenient */
575 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
576 UCHAR ucAction;
577 UCHAR ucEncoderMode;
578 /* =0: DP encoder */
579 /* =1: LVDS encoder */
580 /* =2: DVI encoder */
581 /* =3: HDMI encoder */
582 /* =4: SDVO encoder */
583 UCHAR ucLaneNum; /* how many lanes to enable */
584 UCHAR ucReserved[2];
585} DIG_ENCODER_CONTROL_PARAMETERS_V2;
586 629
587/* ucConfig */ 630typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
631{
632 USHORT usPixelClock; // in 10KHz; for bios convenient
633 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
634 UCHAR ucAction;
635 UCHAR ucEncoderMode;
636 // =0: DP encoder
637 // =1: LVDS encoder
638 // =2: DVI encoder
639 // =3: HDMI encoder
640 // =4: SDVO encoder
641 UCHAR ucLaneNum; // how many lanes to enable
642 UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
643 UCHAR ucReserved;
644}DIG_ENCODER_CONTROL_PARAMETERS_V2;
645
646//ucConfig
588#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 647#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
589#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 648#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
590#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 649#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
@@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
596#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 655#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
597#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 656#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
598 657
599/****************************************************************************/ 658// ucAction:
600/* Structures used by UNIPHYTransmitterControlTable */ 659// ATOM_DISABLE
601/* LVTMATransmitterControlTable */ 660// ATOM_ENABLE
602/* DVOOutputControlTable */ 661#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
603/****************************************************************************/ 662#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
604typedef struct _ATOM_DP_VS_MODE { 663#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
605 UCHAR ucLaneSel; 664#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
606 UCHAR ucLaneSet; 665#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
607} ATOM_DP_VS_MODE; 666#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
608 667#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
609typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { 668#define ATOM_ENCODER_CMD_SETUP 0x0f
610 union { 669
611 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 670// ucStatus
612 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ 671#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
613 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ 672#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
673
674// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
675typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
676{
677#if ATOM_BIG_ENDIAN
678 UCHAR ucReserved1:1;
679 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
680 UCHAR ucReserved:3;
681 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
682#else
683 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
684 UCHAR ucReserved:3;
685 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
686 UCHAR ucReserved1:1;
687#endif
688}ATOM_DIG_ENCODER_CONFIG_V3;
689
690#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
691
692
693typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
694{
695 USHORT usPixelClock; // in 10KHz; for bios convenient
696 ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
697 UCHAR ucAction;
698 UCHAR ucEncoderMode;
699 // =0: DP encoder
700 // =1: LVDS encoder
701 // =2: DVI encoder
702 // =3: HDMI encoder
703 // =4: SDVO encoder
704 // =5: DP audio
705 UCHAR ucLaneNum; // how many lanes to enable
706 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
707 UCHAR ucReserved;
708}DIG_ENCODER_CONTROL_PARAMETERS_V3;
709
710
711// define ucBitPerColor:
712#define PANEL_BPC_UNDEFINE 0x00
713#define PANEL_6BIT_PER_COLOR 0x01
714#define PANEL_8BIT_PER_COLOR 0x02
715#define PANEL_10BIT_PER_COLOR 0x03
716#define PANEL_12BIT_PER_COLOR 0x04
717#define PANEL_16BIT_PER_COLOR 0x05
718
719/****************************************************************************/
720// Structures used by UNIPHYTransmitterControlTable
721// LVTMATransmitterControlTable
722// DVOOutputControlTable
723/****************************************************************************/
724typedef struct _ATOM_DP_VS_MODE
725{
726 UCHAR ucLaneSel;
727 UCHAR ucLaneSet;
728}ATOM_DP_VS_MODE;
729
730typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
731{
732 union
733 {
734 USHORT usPixelClock; // in 10KHz; for bios convenient
735 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
736 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
614 }; 737 };
615 UCHAR ucConfig; 738 UCHAR ucConfig;
616 /* [0]=0: 4 lane Link, */ 739 // [0]=0: 4 lane Link,
617 /* =1: 8 lane Link ( Dual Links TMDS ) */ 740 // =1: 8 lane Link ( Dual Links TMDS )
618 /* [1]=0: InCoherent mode */ 741 // [1]=0: InCoherent mode
619 /* =1: Coherent Mode */ 742 // =1: Coherent Mode
620 /* [2] Link Select: */ 743 // [2] Link Select:
621 /* =0: PHY linkA if bfLane<3 */ 744 // =0: PHY linkA if bfLane<3
622 /* =1: PHY linkB if bfLanes<3 */ 745 // =1: PHY linkB if bfLanes<3
623 /* =0: PHY linkA+B if bfLanes=3 */ 746 // =0: PHY linkA+B if bfLanes=3
624 /* [5:4]PCIE lane Sel */ 747 // [5:4]PCIE lane Sel
625 /* =0: lane 0~3 or 0~7 */ 748 // =0: lane 0~3 or 0~7
626 /* =1: lane 4~7 */ 749 // =1: lane 4~7
627 /* =2: lane 8~11 or 8~15 */ 750 // =2: lane 8~11 or 8~15
628 /* =3: lane 12~15 */ 751 // =3: lane 12~15
629 UCHAR ucAction; /* =0: turn off encoder */ 752 UCHAR ucAction; // =0: turn off encoder
630 /* =1: turn on encoder */ 753 // =1: turn on encoder
631 UCHAR ucReserved[4]; 754 UCHAR ucReserved[4];
632} DIG_TRANSMITTER_CONTROL_PARAMETERS; 755}DIG_TRANSMITTER_CONTROL_PARAMETERS;
633 756
634#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS 757#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
635 758
636/* ucInitInfo */ 759//ucInitInfo
637#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff 760#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
638 761
639/* ucConfig */ 762//ucConfig
640#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 763#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
641#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 764#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
642#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 765#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
643#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 766#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
644#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 767#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
645#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 768#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
646#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 769#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
647 770
648#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 771#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
649#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 772#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
650#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 773#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
651 774
652#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 775#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
653#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 776#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
@@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
661#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 784#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
662#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 785#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
663 786
664/* ucAction */ 787//ucAction
665#define ATOM_TRANSMITTER_ACTION_DISABLE 0 788#define ATOM_TRANSMITTER_ACTION_DISABLE 0
666#define ATOM_TRANSMITTER_ACTION_ENABLE 1 789#define ATOM_TRANSMITTER_ACTION_ENABLE 1
667#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 790#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
@@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
674#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 797#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
675#define ATOM_TRANSMITTER_ACTION_SETUP 10 798#define ATOM_TRANSMITTER_ACTION_SETUP 10
676#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 799#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
800#define ATOM_TRANSMITTER_ACTION_POWER_ON 12
801#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13
677 802
678/* Following are used for DigTransmitterControlTable ver1.2 */ 803// Following are used for DigTransmitterControlTable ver1.2
679typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 { 804typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
805{
680#if ATOM_BIG_ENDIAN 806#if ATOM_BIG_ENDIAN
681 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ 807 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
682 /* =1 Dig Transmitter 2 ( Uniphy CD ) */ 808 // =1 Dig Transmitter 2 ( Uniphy CD )
683 /* =2 Dig Transmitter 3 ( Uniphy EF ) */ 809 // =2 Dig Transmitter 3 ( Uniphy EF )
684 UCHAR ucReserved:1; 810 UCHAR ucReserved:1;
685 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ 811 UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
686 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ 812 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
687 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ 813 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
688 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ 814 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
689 815
690 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ 816 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
691 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ 817 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
692#else 818#else
693 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ 819 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
694 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ 820 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
695 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ 821 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
696 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ 822 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
697 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ 823 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
698 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ 824 UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
699 UCHAR ucReserved:1; 825 UCHAR ucReserved:1;
700 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ 826 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
701 /* =1 Dig Transmitter 2 ( Uniphy CD ) */ 827 // =1 Dig Transmitter 2 ( Uniphy CD )
702 /* =2 Dig Transmitter 3 ( Uniphy EF ) */ 828 // =2 Dig Transmitter 3 ( Uniphy EF )
703#endif 829#endif
704} ATOM_DIG_TRANSMITTER_CONFIG_V2; 830}ATOM_DIG_TRANSMITTER_CONFIG_V2;
705 831
706/* ucConfig */ 832//ucConfig
707/* Bit0 */ 833//Bit0
708#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 834#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
709 835
710/* Bit1 */ 836//Bit1
711#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 837#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
712 838
713/* Bit2 */ 839//Bit2
714#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 840#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
715#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 841#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
716#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 842#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
717 843
718/* Bit3 */ 844// Bit3
719#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 845#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
720#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ 846#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
721#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ 847#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
722 848
723/* Bit4 */ 849// Bit4
724#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 850#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
725 851
726/* Bit7:6 */ 852// Bit7:6
727#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 853#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
728#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */ 854#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB
729#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */ 855#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD
730#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */ 856#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF
731 857
732typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 { 858typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
733 union { 859{
734 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 860 union
735 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ 861 {
736 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ 862 USHORT usPixelClock; // in 10KHz; for bios convenient
863 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
864 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
737 }; 865 };
738 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; 866 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
739 UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */ 867 UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
740 UCHAR ucReserved[4]; 868 UCHAR ucReserved[4];
741} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; 869}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
742 870
743/****************************************************************************/ 871typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
744/* Structures used by DAC1OuputControlTable */ 872{
745/* DAC2OuputControlTable */ 873#if ATOM_BIG_ENDIAN
746/* LVTMAOutputControlTable (Before DEC30) */ 874 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
747/* TMDSAOutputControlTable (Before DEC30) */ 875 // =1 Dig Transmitter 2 ( Uniphy CD )
748/****************************************************************************/ 876 // =2 Dig Transmitter 3 ( Uniphy EF )
749typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS { 877 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
750 UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */ 878 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
751 /* When the display is LCD, in addition to above: */ 879 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
752 /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */ 880 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
753 /* ATOM_LCD_SELFTEST_STOP */ 881 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
882 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
883#else
884 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
885 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
886 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
887 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
888 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
889 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
890 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
891 // =1 Dig Transmitter 2 ( Uniphy CD )
892 // =2 Dig Transmitter 3 ( Uniphy EF )
893#endif
894}ATOM_DIG_TRANSMITTER_CONFIG_V3;
754 895
755 UCHAR aucPadding[3]; /* padding to DWORD aligned */ 896typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
756} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; 897{
898 union
899 {
900 USHORT usPixelClock; // in 10KHz; for bios convenient
901 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
902 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
903 };
904 ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
905 UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
906 UCHAR ucLaneNum;
907 UCHAR ucReserved[3];
908}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
909
910//ucConfig
911//Bit0
912#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01
913
914//Bit1
915#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02
916
917//Bit2
918#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04
919#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00
920#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04
921
922// Bit3
923#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08
924#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00
925#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08
926
927// Bit5:4
928#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30
929#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00
930#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10
931#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20
932
933// Bit7:6
934#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0
935#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB
936#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
937#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
938
939/****************************************************************************/
940// Structures used by DAC1OuputControlTable
941// DAC2OuputControlTable
942// LVTMAOutputControlTable (Before DEC30)
943// TMDSAOutputControlTable (Before DEC30)
944/****************************************************************************/
945typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
946{
947 UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE
948 // When the display is LCD, in addition to above:
949 // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
950 // ATOM_LCD_SELFTEST_STOP
951
952 UCHAR aucPadding[3]; // padding to DWORD aligned
953}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
757 954
758#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 955#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
759 956
760#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 957
958#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
761#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION 959#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
762 960
763#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 961#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
764#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION 962#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
765 963
766#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 964#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
@@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
782#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION 980#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
783#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS 981#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
784 982
785/****************************************************************************/ 983/****************************************************************************/
786/* Structures used by BlankCRTCTable */ 984// Structures used by BlankCRTCTable
787/****************************************************************************/ 985/****************************************************************************/
788typedef struct _BLANK_CRTC_PARAMETERS { 986typedef struct _BLANK_CRTC_PARAMETERS
789 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 987{
790 UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */ 988 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
791 USHORT usBlackColorRCr; 989 UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF
792 USHORT usBlackColorGY; 990 USHORT usBlackColorRCr;
793 USHORT usBlackColorBCb; 991 USHORT usBlackColorGY;
794} BLANK_CRTC_PARAMETERS; 992 USHORT usBlackColorBCb;
993}BLANK_CRTC_PARAMETERS;
795#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS 994#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
796 995
797/****************************************************************************/ 996/****************************************************************************/
798/* Structures used by EnableCRTCTable */ 997// Structures used by EnableCRTCTable
799/* EnableCRTCMemReqTable */ 998// EnableCRTCMemReqTable
800/* UpdateCRTC_DoubleBufferRegistersTable */ 999// UpdateCRTC_DoubleBufferRegistersTable
801/****************************************************************************/ 1000/****************************************************************************/
802typedef struct _ENABLE_CRTC_PARAMETERS { 1001typedef struct _ENABLE_CRTC_PARAMETERS
803 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1002{
804 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1003 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
805 UCHAR ucPadding[2]; 1004 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
806} ENABLE_CRTC_PARAMETERS; 1005 UCHAR ucPadding[2];
1006}ENABLE_CRTC_PARAMETERS;
807#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS 1007#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
808 1008
809/****************************************************************************/ 1009/****************************************************************************/
810/* Structures used by SetCRTC_OverScanTable */ 1010// Structures used by SetCRTC_OverScanTable
811/****************************************************************************/ 1011/****************************************************************************/
812typedef struct _SET_CRTC_OVERSCAN_PARAMETERS { 1012typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
813 USHORT usOverscanRight; /* right */ 1013{
814 USHORT usOverscanLeft; /* left */ 1014 USHORT usOverscanRight; // right
815 USHORT usOverscanBottom; /* bottom */ 1015 USHORT usOverscanLeft; // left
816 USHORT usOverscanTop; /* top */ 1016 USHORT usOverscanBottom; // bottom
817 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1017 USHORT usOverscanTop; // top
818 UCHAR ucPadding[3]; 1018 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
819} SET_CRTC_OVERSCAN_PARAMETERS; 1019 UCHAR ucPadding[3];
1020}SET_CRTC_OVERSCAN_PARAMETERS;
820#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS 1021#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
821 1022
822/****************************************************************************/ 1023/****************************************************************************/
823/* Structures used by SetCRTC_ReplicationTable */ 1024// Structures used by SetCRTC_ReplicationTable
824/****************************************************************************/ 1025/****************************************************************************/
825typedef struct _SET_CRTC_REPLICATION_PARAMETERS { 1026typedef struct _SET_CRTC_REPLICATION_PARAMETERS
826 UCHAR ucH_Replication; /* horizontal replication */ 1027{
827 UCHAR ucV_Replication; /* vertical replication */ 1028 UCHAR ucH_Replication; // horizontal replication
828 UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1029 UCHAR ucV_Replication; // vertical replication
829 UCHAR ucPadding; 1030 UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2
830} SET_CRTC_REPLICATION_PARAMETERS; 1031 UCHAR ucPadding;
1032}SET_CRTC_REPLICATION_PARAMETERS;
831#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS 1033#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
832 1034
833/****************************************************************************/ 1035/****************************************************************************/
834/* Structures used by SelectCRTC_SourceTable */ 1036// Structures used by SelectCRTC_SourceTable
835/****************************************************************************/ 1037/****************************************************************************/
836typedef struct _SELECT_CRTC_SOURCE_PARAMETERS { 1038typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
837 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1039{
838 UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */ 1040 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
839 UCHAR ucPadding[2]; 1041 UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
840} SELECT_CRTC_SOURCE_PARAMETERS; 1042 UCHAR ucPadding[2];
1043}SELECT_CRTC_SOURCE_PARAMETERS;
841#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS 1044#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
842 1045
843typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 { 1046typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
844 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1047{
845 UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */ 1048 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
846 UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */ 1049 UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
847 UCHAR ucPadding; 1050 UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO
848} SELECT_CRTC_SOURCE_PARAMETERS_V2; 1051 UCHAR ucPadding;
849 1052}SELECT_CRTC_SOURCE_PARAMETERS_V2;
850/* ucEncoderID */ 1053
851/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */ 1054//ucEncoderID
852/* #define ASIC_INT_TV_ENCODER_ID 0x02 */ 1055//#define ASIC_INT_DAC1_ENCODER_ID 0x00
853/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */ 1056//#define ASIC_INT_TV_ENCODER_ID 0x02
854/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */ 1057//#define ASIC_INT_DIG1_ENCODER_ID 0x03
855/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */ 1058//#define ASIC_INT_DAC2_ENCODER_ID 0x04
856/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */ 1059//#define ASIC_EXT_TV_ENCODER_ID 0x06
857/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */ 1060//#define ASIC_INT_DVO_ENCODER_ID 0x07
858/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */ 1061//#define ASIC_INT_DIG2_ENCODER_ID 0x09
859 1062//#define ASIC_EXT_DIG_ENCODER_ID 0x05
860/* ucEncodeMode */ 1063
861/* #define ATOM_ENCODER_MODE_DP 0 */ 1064//ucEncodeMode
862/* #define ATOM_ENCODER_MODE_LVDS 1 */ 1065//#define ATOM_ENCODER_MODE_DP 0
863/* #define ATOM_ENCODER_MODE_DVI 2 */ 1066//#define ATOM_ENCODER_MODE_LVDS 1
864/* #define ATOM_ENCODER_MODE_HDMI 3 */ 1067//#define ATOM_ENCODER_MODE_DVI 2
865/* #define ATOM_ENCODER_MODE_SDVO 4 */ 1068//#define ATOM_ENCODER_MODE_HDMI 3
866/* #define ATOM_ENCODER_MODE_TV 13 */ 1069//#define ATOM_ENCODER_MODE_SDVO 4
867/* #define ATOM_ENCODER_MODE_CV 14 */ 1070//#define ATOM_ENCODER_MODE_TV 13
868/* #define ATOM_ENCODER_MODE_CRT 15 */ 1071//#define ATOM_ENCODER_MODE_CV 14
869 1072//#define ATOM_ENCODER_MODE_CRT 15
870/****************************************************************************/ 1073
871/* Structures used by SetPixelClockTable */ 1074/****************************************************************************/
872/* GetPixelClockTable */ 1075// Structures used by SetPixelClockTable
873/****************************************************************************/ 1076// GetPixelClockTable
874/* Major revision=1., Minor revision=1 */ 1077/****************************************************************************/
875typedef struct _PIXEL_CLOCK_PARAMETERS { 1078//Major revision=1., Minor revision=1
876 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1079typedef struct _PIXEL_CLOCK_PARAMETERS
877 /* 0 means disable PPLL */ 1080{
878 USHORT usRefDiv; /* Reference divider */ 1081 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
879 USHORT usFbDiv; /* feedback divider */ 1082 // 0 means disable PPLL
880 UCHAR ucPostDiv; /* post divider */ 1083 USHORT usRefDiv; // Reference divider
881 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1084 USHORT usFbDiv; // feedback divider
882 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1085 UCHAR ucPostDiv; // post divider
883 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ 1086 UCHAR ucFracFbDiv; // fractional feedback divider
884 UCHAR ucCRTC; /* Which CRTC uses this Ppll */ 1087 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
885 UCHAR ucPadding; 1088 UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
886} PIXEL_CLOCK_PARAMETERS; 1089 UCHAR ucCRTC; // Which CRTC uses this Ppll
887 1090 UCHAR ucPadding;
888/* Major revision=1., Minor revision=2, add ucMiscIfno */ 1091}PIXEL_CLOCK_PARAMETERS;
889/* ucMiscInfo: */ 1092
1093//Major revision=1., Minor revision=2, add ucMiscIfno
1094//ucMiscInfo:
890#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 1095#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
891#define MISC_DEVICE_INDEX_MASK 0xF0 1096#define MISC_DEVICE_INDEX_MASK 0xF0
892#define MISC_DEVICE_INDEX_SHIFT 4 1097#define MISC_DEVICE_INDEX_SHIFT 4
893 1098
894typedef struct _PIXEL_CLOCK_PARAMETERS_V2 { 1099typedef struct _PIXEL_CLOCK_PARAMETERS_V2
895 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1100{
896 /* 0 means disable PPLL */ 1101 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
897 USHORT usRefDiv; /* Reference divider */ 1102 // 0 means disable PPLL
898 USHORT usFbDiv; /* feedback divider */ 1103 USHORT usRefDiv; // Reference divider
899 UCHAR ucPostDiv; /* post divider */ 1104 USHORT usFbDiv; // feedback divider
900 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1105 UCHAR ucPostDiv; // post divider
901 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1106 UCHAR ucFracFbDiv; // fractional feedback divider
902 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ 1107 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
903 UCHAR ucCRTC; /* Which CRTC uses this Ppll */ 1108 UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
904 UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */ 1109 UCHAR ucCRTC; // Which CRTC uses this Ppll
905} PIXEL_CLOCK_PARAMETERS_V2; 1110 UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
906 1111}PIXEL_CLOCK_PARAMETERS_V2;
907/* Major revision=1., Minor revision=3, structure/definition change */ 1112
908/* ucEncoderMode: */ 1113//Major revision=1., Minor revision=3, structure/definition change
909/* ATOM_ENCODER_MODE_DP */ 1114//ucEncoderMode:
910/* ATOM_ENOCDER_MODE_LVDS */ 1115//ATOM_ENCODER_MODE_DP
911/* ATOM_ENOCDER_MODE_DVI */ 1116//ATOM_ENOCDER_MODE_LVDS
912/* ATOM_ENOCDER_MODE_HDMI */ 1117//ATOM_ENOCDER_MODE_DVI
913/* ATOM_ENOCDER_MODE_SDVO */ 1118//ATOM_ENOCDER_MODE_HDMI
914/* ATOM_ENCODER_MODE_TV 13 */ 1119//ATOM_ENOCDER_MODE_SDVO
915/* ATOM_ENCODER_MODE_CV 14 */ 1120//ATOM_ENCODER_MODE_TV 13
916/* ATOM_ENCODER_MODE_CRT 15 */ 1121//ATOM_ENCODER_MODE_CV 14
917 1122//ATOM_ENCODER_MODE_CRT 15
918/* ucDVOConfig */ 1123
919/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */ 1124//ucDVOConfig
920/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */ 1125//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
921/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */ 1126//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
922/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */ 1127//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
923/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */ 1128//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
924/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */ 1129//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
925/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */ 1130//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
926 1131//#define DVO_ENCODER_CONFIG_24BIT 0x08
927/* ucMiscInfo: also changed, see below */ 1132
1133//ucMiscInfo: also changed, see below
928#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 1134#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
929#define PIXEL_CLOCK_MISC_VGA_MODE 0x02 1135#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
930#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 1136#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
931#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 1137#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
932#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 1138#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
933#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 1139#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
1140#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10
1141// V1.4 for RoadRunner
1142#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
1143#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
934 1144
935typedef struct _PIXEL_CLOCK_PARAMETERS_V3 { 1145typedef struct _PIXEL_CLOCK_PARAMETERS_V3
936 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1146{
937 /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */ 1147 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
938 USHORT usRefDiv; /* Reference divider */ 1148 // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
939 USHORT usFbDiv; /* feedback divider */ 1149 USHORT usRefDiv; // Reference divider
940 UCHAR ucPostDiv; /* post divider */ 1150 USHORT usFbDiv; // feedback divider
941 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1151 UCHAR ucPostDiv; // post divider
942 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1152 UCHAR ucFracFbDiv; // fractional feedback divider
943 UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */ 1153 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
944 union { 1154 UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h
945 UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */ 1155 union
946 UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */ 1156 {
1157 UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
1158 UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit
947 }; 1159 };
948 UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */ 1160 UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
949 /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */ 1161 // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
950} PIXEL_CLOCK_PARAMETERS_V3; 1162 // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
1163}PIXEL_CLOCK_PARAMETERS_V3;
951 1164
952#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 1165#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
953#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST 1166#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
954 1167
955/****************************************************************************/ 1168typedef struct _PIXEL_CLOCK_PARAMETERS_V5
956/* Structures used by AdjustDisplayPllTable */ 1169{
957/****************************************************************************/ 1170 UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
958typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS { 1171 // drive the pixel clock. not used for DCPLL case.
1172 union{
1173 UCHAR ucReserved;
1174 UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed.
1175 };
1176 USHORT usPixelClock; // target the pixel clock to drive the CRTC timing
1177 // 0 means disable PPLL/DCPLL.
1178 USHORT usFbDiv; // feedback divider integer part.
1179 UCHAR ucPostDiv; // post divider.
1180 UCHAR ucRefDiv; // Reference divider
1181 UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
1182 UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
1183 // indicate which graphic encoder will be used.
1184 UCHAR ucEncoderMode; // Encoder mode:
1185 UCHAR ucMiscInfo; // bit[0]= Force program PPLL
1186 // bit[1]= when VGA timing is used.
1187 // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
1188 // bit[4]= RefClock source for PPLL.
1189 // =0: XTLAIN( default mode )
1190 // =1: other external clock source, which is pre-defined
1191 // by VBIOS depend on the feature required.
1192 // bit[7:5]: reserved.
1193 ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
1194
1195}PIXEL_CLOCK_PARAMETERS_V5;
1196
1197#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01
1198#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02
1199#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c
1200#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00
1201#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04
1202#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
1203#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
1204
1205typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
1206{
1207 PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
1208}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
1209
1210typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
1211{
1212 UCHAR ucStatus;
1213 UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
1214 UCHAR ucReserved[2];
1215}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
1216
1217typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
1218{
1219 PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
1220}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
1221
1222/****************************************************************************/
1223// Structures used by AdjustDisplayPllTable
1224/****************************************************************************/
1225typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
1226{
959 USHORT usPixelClock; 1227 USHORT usPixelClock;
960 UCHAR ucTransmitterID; 1228 UCHAR ucTransmitterID;
961 UCHAR ucEncodeMode; 1229 UCHAR ucEncodeMode;
962 union { 1230 union
963 UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */ 1231 {
964 UCHAR ucConfig; /* if none DVO, not defined yet */ 1232 UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit
1233 UCHAR ucConfig; //if none DVO, not defined yet
965 }; 1234 };
966 UCHAR ucReserved[3]; 1235 UCHAR ucReserved[3];
967} ADJUST_DISPLAY_PLL_PARAMETERS; 1236}ADJUST_DISPLAY_PLL_PARAMETERS;
968 1237
969#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 1238#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
970
971#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS 1239#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
972 1240
973/****************************************************************************/ 1241typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
974/* Structures used by EnableYUVTable */ 1242{
975/****************************************************************************/ 1243 USHORT usPixelClock; // target pixel clock
976typedef struct _ENABLE_YUV_PARAMETERS { 1244 UCHAR ucTransmitterID; // transmitter id defined in objectid.h
977 UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */ 1245 UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
978 UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */ 1246 UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
979 UCHAR ucPadding[2]; 1247 UCHAR ucReserved[3];
980} ENABLE_YUV_PARAMETERS; 1248}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
1249
1250// usDispPllConfig v1.2 for RoadRunner
1251#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO
1252#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO
1253#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO
1254#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO
1255#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO
1256#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO
1257#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO
1258#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS
1259#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI
1260#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS
1261
1262
1263typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
1264{
1265 ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
1266 UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
1267 UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
1268 UCHAR ucReserved[2];
1269}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
1270
1271typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
1272{
1273 union
1274 {
1275 ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput;
1276 ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
1277 };
1278} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
1279
1280/****************************************************************************/
1281// Structures used by EnableYUVTable
1282/****************************************************************************/
1283typedef struct _ENABLE_YUV_PARAMETERS
1284{
1285 UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
1286 UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format
1287 UCHAR ucPadding[2];
1288}ENABLE_YUV_PARAMETERS;
981#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS 1289#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
982 1290
983/****************************************************************************/ 1291/****************************************************************************/
984/* Structures used by GetMemoryClockTable */ 1292// Structures used by GetMemoryClockTable
985/****************************************************************************/ 1293/****************************************************************************/
986typedef struct _GET_MEMORY_CLOCK_PARAMETERS { 1294typedef struct _GET_MEMORY_CLOCK_PARAMETERS
987 ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */ 1295{
1296 ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit
988} GET_MEMORY_CLOCK_PARAMETERS; 1297} GET_MEMORY_CLOCK_PARAMETERS;
989#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS 1298#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
990 1299
991/****************************************************************************/ 1300/****************************************************************************/
992/* Structures used by GetEngineClockTable */ 1301// Structures used by GetEngineClockTable
993/****************************************************************************/ 1302/****************************************************************************/
994typedef struct _GET_ENGINE_CLOCK_PARAMETERS { 1303typedef struct _GET_ENGINE_CLOCK_PARAMETERS
995 ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */ 1304{
1305 ULONG ulReturnEngineClock; // current engine speed in 10KHz unit
996} GET_ENGINE_CLOCK_PARAMETERS; 1306} GET_ENGINE_CLOCK_PARAMETERS;
997#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS 1307#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
998 1308
999/****************************************************************************/ 1309/****************************************************************************/
1000/* Following Structures and constant may be obsolete */ 1310// Following Structures and constant may be obsolete
1001/****************************************************************************/ 1311/****************************************************************************/
1002/* Maxium 8 bytes,the data read in will be placed in the parameter space. */ 1312//Maxium 8 bytes,the data read in will be placed in the parameter space.
1003/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */ 1313//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
1004typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS { 1314typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1005 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1315{
1006 USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */ 1316 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1007 USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */ 1317 USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
1008 /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */ 1318 USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
1009 UCHAR ucSlaveAddr; /* Read from which slave */ 1319 //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
1010 UCHAR ucLineNumber; /* Read from which HW assisted line */ 1320 UCHAR ucSlaveAddr; //Read from which slave
1011} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; 1321 UCHAR ucLineNumber; //Read from which HW assisted line
1322}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
1012#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS 1323#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1013 1324
1325
1014#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 1326#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
1015#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 1327#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
1016#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 1328#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
1017#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 1329#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
1018#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 1330#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
1019 1331
1020typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS { 1332typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1021 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1333{
1022 USHORT usByteOffset; /* Write to which byte */ 1334 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1023 /* Upper portion of usByteOffset is Format of data */ 1335 USHORT usByteOffset; //Write to which byte
1024 /* 1bytePS+offsetPS */ 1336 //Upper portion of usByteOffset is Format of data
1025 /* 2bytesPS+offsetPS */ 1337 //1bytePS+offsetPS
1026 /* blockID+offsetPS */ 1338 //2bytesPS+offsetPS
1027 /* blockID+offsetID */ 1339 //blockID+offsetPS
1028 /* blockID+counterID+offsetID */ 1340 //blockID+offsetID
1029 UCHAR ucData; /* PS data1 */ 1341 //blockID+counterID+offsetID
1030 UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */ 1342 UCHAR ucData; //PS data1
1031 UCHAR ucSlaveAddr; /* Write to which slave */ 1343 UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2
1032 UCHAR ucLineNumber; /* Write from which HW assisted line */ 1344 UCHAR ucSlaveAddr; //Write to which slave
1033} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; 1345 UCHAR ucLineNumber; //Write from which HW assisted line
1346}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
1034 1347
1035#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 1348#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1036 1349
1037typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS { 1350typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
1038 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1351{
1039 UCHAR ucSlaveAddr; /* Write to which slave */ 1352 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1040 UCHAR ucLineNumber; /* Write from which HW assisted line */ 1353 UCHAR ucSlaveAddr; //Write to which slave
1041} SET_UP_HW_I2C_DATA_PARAMETERS; 1354 UCHAR ucLineNumber; //Write from which HW assisted line
1355}SET_UP_HW_I2C_DATA_PARAMETERS;
1356
1042 1357
1043/**************************************************************************/ 1358/**************************************************************************/
1044#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 1359#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1045 1360
1046/****************************************************************************/ 1361/****************************************************************************/
1047/* Structures used by PowerConnectorDetectionTable */ 1362// Structures used by PowerConnectorDetectionTable
1048/****************************************************************************/ 1363/****************************************************************************/
1049typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS { 1364typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS
1050 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ 1365{
1051 UCHAR ucPwrBehaviorId; 1366 UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
1052 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ 1367 UCHAR ucPwrBehaviorId;
1053} POWER_CONNECTOR_DETECTION_PARAMETERS; 1368 USHORT usPwrBudget; //how much power currently boot to in unit of watt
1054 1369}POWER_CONNECTOR_DETECTION_PARAMETERS;
1055typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION { 1370
1056 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ 1371typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
1057 UCHAR ucReserved; 1372{
1058 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ 1373 UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
1059 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; 1374 UCHAR ucReserved;
1060} POWER_CONNECTOR_DETECTION_PS_ALLOCATION; 1375 USHORT usPwrBudget; //how much power currently boot to in unit of watt
1376 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1377}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
1061 1378
1062/****************************LVDS SS Command Table Definitions**********************/ 1379/****************************LVDS SS Command Table Definitions**********************/
1063 1380
1064/****************************************************************************/ 1381/****************************************************************************/
1065/* Structures used by EnableSpreadSpectrumOnPPLLTable */ 1382// Structures used by EnableSpreadSpectrumOnPPLLTable
1066/****************************************************************************/ 1383/****************************************************************************/
1067typedef struct _ENABLE_LVDS_SS_PARAMETERS { 1384typedef struct _ENABLE_LVDS_SS_PARAMETERS
1068 USHORT usSpreadSpectrumPercentage; 1385{
1069 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1386 USHORT usSpreadSpectrumPercentage;
1070 UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */ 1387 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1071 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1388 UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
1072 UCHAR ucPadding[3]; 1389 UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
1073} ENABLE_LVDS_SS_PARAMETERS; 1390 UCHAR ucPadding[3];
1074 1391}ENABLE_LVDS_SS_PARAMETERS;
1075/* ucTableFormatRevision=1,ucTableContentRevision=2 */ 1392
1076typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 { 1393//ucTableFormatRevision=1,ucTableContentRevision=2
1077 USHORT usSpreadSpectrumPercentage; 1394typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2
1078 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1395{
1079 UCHAR ucSpreadSpectrumStep; /* */ 1396 USHORT usSpreadSpectrumPercentage;
1080 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1397 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1081 UCHAR ucSpreadSpectrumDelay; 1398 UCHAR ucSpreadSpectrumStep; //
1082 UCHAR ucSpreadSpectrumRange; 1399 UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
1083 UCHAR ucPadding; 1400 UCHAR ucSpreadSpectrumDelay;
1084} ENABLE_LVDS_SS_PARAMETERS_V2; 1401 UCHAR ucSpreadSpectrumRange;
1085 1402 UCHAR ucPadding;
1086/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */ 1403}ENABLE_LVDS_SS_PARAMETERS_V2;
1087typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL { 1404
1088 USHORT usSpreadSpectrumPercentage; 1405//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
1089 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1406typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL
1090 UCHAR ucSpreadSpectrumStep; /* */ 1407{
1091 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1408 USHORT usSpreadSpectrumPercentage;
1092 UCHAR ucSpreadSpectrumDelay; 1409 UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1093 UCHAR ucSpreadSpectrumRange; 1410 UCHAR ucSpreadSpectrumStep; //
1094 UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */ 1411 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
1095} ENABLE_SPREAD_SPECTRUM_ON_PPLL; 1412 UCHAR ucSpreadSpectrumDelay;
1413 UCHAR ucSpreadSpectrumRange;
1414 UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2
1415}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
1416
1417typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
1418{
1419 USHORT usSpreadSpectrumPercentage;
1420 UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
1421 // Bit[1]: 1-Ext. 0-Int.
1422 // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
1423 // Bits[7:4] reserved
1424 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
1425 USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
1426 USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
1427}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
1428
1429#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00
1430#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01
1431#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02
1432#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c
1433#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00
1434#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04
1435#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08
1436#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF
1437#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0
1438#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
1439#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
1096 1440
1097#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL 1441#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
1098 1442
1099/**************************************************************************/ 1443/**************************************************************************/
1100 1444
1101typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION { 1445typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
1102 PIXEL_CLOCK_PARAMETERS sPCLKInput; 1446{
1103 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */ 1447 PIXEL_CLOCK_PARAMETERS sPCLKInput;
1104} SET_PIXEL_CLOCK_PS_ALLOCATION; 1448 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
1449}SET_PIXEL_CLOCK_PS_ALLOCATION;
1105 1450
1106#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION 1451#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
1107 1452
1108/****************************************************************************/ 1453/****************************************************************************/
1109/* Structures used by ### */ 1454// Structures used by ###
1110/****************************************************************************/ 1455/****************************************************************************/
1111typedef struct _MEMORY_TRAINING_PARAMETERS { 1456typedef struct _MEMORY_TRAINING_PARAMETERS
1112 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 1457{
1113} MEMORY_TRAINING_PARAMETERS; 1458 ULONG ulTargetMemoryClock; //In 10Khz unit
1459}MEMORY_TRAINING_PARAMETERS;
1114#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS 1460#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
1115 1461
1462
1116/****************************LVDS and other encoder command table definitions **********************/ 1463/****************************LVDS and other encoder command table definitions **********************/
1117 1464
1118/****************************************************************************/
1119/* Structures used by LVDSEncoderControlTable (Before DCE30) */
1120/* LVTMAEncoderControlTable (Before DCE30) */
1121/* TMDSAEncoderControlTable (Before DCE30) */
1122/****************************************************************************/
1123typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
1124 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1125 UCHAR ucMisc; /* bit0=0: Enable single link */
1126 /* =1: Enable dual link */
1127 /* Bit1=0: 666RGB */
1128 /* =1: 888RGB */
1129 UCHAR ucAction; /* 0: turn off encoder */
1130 /* 1: setup and turn on encoder */
1131} LVDS_ENCODER_CONTROL_PARAMETERS;
1132 1465
1133#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS 1466/****************************************************************************/
1467// Structures used by LVDSEncoderControlTable (Before DCE30)
1468// LVTMAEncoderControlTable (Before DCE30)
1469// TMDSAEncoderControlTable (Before DCE30)
1470/****************************************************************************/
1471typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
1472{
1473 USHORT usPixelClock; // in 10KHz; for bios convenient
1474 UCHAR ucMisc; // bit0=0: Enable single link
1475 // =1: Enable dual link
1476 // Bit1=0: 666RGB
1477 // =1: 888RGB
1478 UCHAR ucAction; // 0: turn off encoder
1479 // 1: setup and turn on encoder
1480}LVDS_ENCODER_CONTROL_PARAMETERS;
1134 1481
1482#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
1483
1135#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS 1484#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
1136#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS 1485#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
1137 1486
1138#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS 1487#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
1139#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS 1488#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
1140 1489
1141/* ucTableFormatRevision=1,ucTableContentRevision=2 */
1142typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1143 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1144 UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */
1145 UCHAR ucAction; /* 0: turn off encoder */
1146 /* 1: setup and turn on encoder */
1147 UCHAR ucTruncate; /* bit0=0: Disable truncate */
1148 /* =1: Enable truncate */
1149 /* bit4=0: 666RGB */
1150 /* =1: 888RGB */
1151 UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
1152 /* =1: Enable spatial dithering */
1153 /* bit4=0: 666RGB */
1154 /* =1: 888RGB */
1155 UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
1156 /* =1: Enable temporal dithering */
1157 /* bit4=0: 666RGB */
1158 /* =1: 888RGB */
1159 /* bit5=0: Gray level 2 */
1160 /* =1: Gray level 4 */
1161 UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
1162 /* =1: 25FRC_SEL pattern F */
1163 /* bit6:5=0: 50FRC_SEL pattern A */
1164 /* =1: 50FRC_SEL pattern B */
1165 /* =2: 50FRC_SEL pattern C */
1166 /* =3: 50FRC_SEL pattern D */
1167 /* bit7=0: 75FRC_SEL pattern E */
1168 /* =1: 75FRC_SEL pattern F */
1169} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1170 1490
1171#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1491//ucTableFormatRevision=1,ucTableContentRevision=2
1492typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
1493{
1494 USHORT usPixelClock; // in 10KHz; for bios convenient
1495 UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below
1496 UCHAR ucAction; // 0: turn off encoder
1497 // 1: setup and turn on encoder
1498 UCHAR ucTruncate; // bit0=0: Disable truncate
1499 // =1: Enable truncate
1500 // bit4=0: 666RGB
1501 // =1: 888RGB
1502 UCHAR ucSpatial; // bit0=0: Disable spatial dithering
1503 // =1: Enable spatial dithering
1504 // bit4=0: 666RGB
1505 // =1: 888RGB
1506 UCHAR ucTemporal; // bit0=0: Disable temporal dithering
1507 // =1: Enable temporal dithering
1508 // bit4=0: 666RGB
1509 // =1: 888RGB
1510 // bit5=0: Gray level 2
1511 // =1: Gray level 4
1512 UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E
1513 // =1: 25FRC_SEL pattern F
1514 // bit6:5=0: 50FRC_SEL pattern A
1515 // =1: 50FRC_SEL pattern B
1516 // =2: 50FRC_SEL pattern C
1517 // =3: 50FRC_SEL pattern D
1518 // bit7=0: 75FRC_SEL pattern E
1519 // =1: 75FRC_SEL pattern F
1520}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1172 1521
1522#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1523
1173#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1524#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1174#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 1525#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1175 1526
1176#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 1527#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1177#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 1528#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
1178 1529
@@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1185#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 1536#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1186#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 1537#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
1187 1538
1188/****************************************************************************/ 1539/****************************************************************************/
1189/* Structures used by ### */ 1540// Structures used by ###
1190/****************************************************************************/ 1541/****************************************************************************/
1191typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS { 1542typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
1192 UCHAR ucEnable; /* Enable or Disable External TMDS encoder */ 1543{
1193 UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */ 1544 UCHAR ucEnable; // Enable or Disable External TMDS encoder
1194 UCHAR ucPadding[2]; 1545 UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
1195} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; 1546 UCHAR ucPadding[2];
1196 1547}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
1197typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION { 1548
1198 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; 1549typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
1199 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ 1550{
1200} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; 1551 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
1552 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
1553}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
1201 1554
1202#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1555#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1203 1556
1204typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 { 1557typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
1205 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; 1558{
1206 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ 1559 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
1207} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; 1560 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
1561}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
1208 1562
1209typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION { 1563typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
1210 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; 1564{
1211 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; 1565 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
1212} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; 1566 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1567}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
1213 1568
1214/****************************************************************************/ 1569/****************************************************************************/
1215/* Structures used by DVOEncoderControlTable */ 1570// Structures used by DVOEncoderControlTable
1216/****************************************************************************/ 1571/****************************************************************************/
1217/* ucTableFormatRevision=1,ucTableContentRevision=3 */ 1572//ucTableFormatRevision=1,ucTableContentRevision=3
1218 1573
1219/* ucDVOConfig: */ 1574//ucDVOConfig:
1220#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 1575#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
1221#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 1576#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
1222#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 1577#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
@@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
1225#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 1580#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
1226#define DVO_ENCODER_CONFIG_24BIT 0x08 1581#define DVO_ENCODER_CONFIG_24BIT 0x08
1227 1582
1228typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { 1583typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
1229 USHORT usPixelClock; 1584{
1230 UCHAR ucDVOConfig; 1585 USHORT usPixelClock;
1231 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ 1586 UCHAR ucDVOConfig;
1232 UCHAR ucReseved[4]; 1587 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
1233} DVO_ENCODER_CONTROL_PARAMETERS_V3; 1588 UCHAR ucReseved[4];
1589}DVO_ENCODER_CONTROL_PARAMETERS_V3;
1234#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 1590#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
1235 1591
1236/* ucTableFormatRevision=1 */ 1592//ucTableFormatRevision=1
1237/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */ 1593//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
1238/* bit1=0: non-coherent mode */ 1594// bit1=0: non-coherent mode
1239/* =1: coherent mode */ 1595// =1: coherent mode
1240 1596
1241/* ========================================================================================== */ 1597//==========================================================================================
1242/* Only change is here next time when changing encoder parameter definitions again! */ 1598//Only change is here next time when changing encoder parameter definitions again!
1243#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 1599#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1244#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST 1600#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
1245 1601
@@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1252#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS 1608#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
1253#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION 1609#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
1254 1610
1255/* ========================================================================================== */ 1611//==========================================================================================
1256#define PANEL_ENCODER_MISC_DUAL 0x01 1612#define PANEL_ENCODER_MISC_DUAL 0x01
1257#define PANEL_ENCODER_MISC_COHERENT 0x02 1613#define PANEL_ENCODER_MISC_COHERENT 0x02
1258#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 1614#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
@@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1281#define PANEL_ENCODER_75FRC_E 0x00 1637#define PANEL_ENCODER_75FRC_E 0x00
1282#define PANEL_ENCODER_75FRC_F 0x80 1638#define PANEL_ENCODER_75FRC_F 0x80
1283 1639
1284/****************************************************************************/ 1640/****************************************************************************/
1285/* Structures used by SetVoltageTable */ 1641// Structures used by SetVoltageTable
1286/****************************************************************************/ 1642/****************************************************************************/
1287#define SET_VOLTAGE_TYPE_ASIC_VDDC 1 1643#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
1288#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 1644#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
1289#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 1645#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
1290#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 1646#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
1291#define SET_VOLTAGE_INIT_MODE 5 1647#define SET_VOLTAGE_INIT_MODE 5
1292#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */ 1648#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic
1293 1649
1294#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 1650#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
1295#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 1651#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
1296#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 1652#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
1297 1653
1298#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 1654#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
1299#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 1655#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
1300#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 1656#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
1301 1657
1302typedef struct _SET_VOLTAGE_PARAMETERS { 1658typedef struct _SET_VOLTAGE_PARAMETERS
1303 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */ 1659{
1304 UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */ 1660 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
1305 UCHAR ucVoltageIndex; /* An index to tell which voltage level */ 1661 UCHAR ucVoltageMode; // To set all, to set source A or source B or ...
1306 UCHAR ucReserved; 1662 UCHAR ucVoltageIndex; // An index to tell which voltage level
1307} SET_VOLTAGE_PARAMETERS; 1663 UCHAR ucReserved;
1308 1664}SET_VOLTAGE_PARAMETERS;
1309typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
1310 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
1311 UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
1312 USHORT usVoltageLevel; /* real voltage level */
1313} SET_VOLTAGE_PARAMETERS_V2;
1314
1315typedef struct _SET_VOLTAGE_PS_ALLOCATION {
1316 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1317 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1318} SET_VOLTAGE_PS_ALLOCATION;
1319
1320/****************************************************************************/
1321/* Structures used by TVEncoderControlTable */
1322/****************************************************************************/
1323typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
1324 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1325 UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
1326 UCHAR ucAction; /* 0: turn off encoder */
1327 /* 1: setup and turn on encoder */
1328} TV_ENCODER_CONTROL_PARAMETERS;
1329
1330typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
1331 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1332 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
1333} TV_ENCODER_CONTROL_PS_ALLOCATION;
1334
1335/* ==============================Data Table Portion==================================== */
1336
1337#ifdef UEFI_BUILD
1338#define UTEMP USHORT
1339#define USHORT void*
1340#endif
1341
1342/****************************************************************************/
1343/* Structure used in Data.mtb */
1344/****************************************************************************/
1345typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
1346 USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
1347 USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
1348 USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
1349 USHORT StandardVESA_Timing; /* Only used by Bios */
1350 USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
1351 USHORT DAC_Info; /* Will be obsolete from R600 */
1352 USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
1353 USHORT TMDS_Info; /* Will be obsolete from R600 */
1354 USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
1355 USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
1356 USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
1357 USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
1358 USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
1359 USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
1360 USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
1361 USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
1362 USHORT CompassionateData; /* Will be obsolete from R600 */
1363 USHORT SaveRestoreInfo; /* Only used by Bios */
1364 USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
1365 USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
1366 USHORT XTMDS_Info; /* Will be obsolete from R600 */
1367 USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
1368 USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
1369 USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
1370 USHORT MC_InitParameter; /* Only used by command table */
1371 USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
1372 USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
1373 USHORT TV_VideoMode; /* Only used by command table */
1374 USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
1375 USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
1376 USHORT IntegratedSystemInfo; /* Shared by various SW components */
1377 USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
1378 USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
1379 USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
1380} ATOM_MASTER_LIST_OF_DATA_TABLES;
1381
1382#ifdef UEFI_BUILD
1383#define USHORT UTEMP
1384#endif
1385 1665
1386typedef struct _ATOM_MASTER_DATA_TABLE { 1666typedef struct _SET_VOLTAGE_PARAMETERS_V2
1387 ATOM_COMMON_TABLE_HEADER sHeader; 1667{
1388 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; 1668 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
1389} ATOM_MASTER_DATA_TABLE; 1669 UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode
1670 USHORT usVoltageLevel; // real voltage level
1671}SET_VOLTAGE_PARAMETERS_V2;
1390 1672
1391/****************************************************************************/ 1673typedef struct _SET_VOLTAGE_PS_ALLOCATION
1392/* Structure used in MultimediaCapabilityInfoTable */ 1674{
1393/****************************************************************************/ 1675 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1394typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO { 1676 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1395 ATOM_COMMON_TABLE_HEADER sHeader; 1677}SET_VOLTAGE_PS_ALLOCATION;
1396 ULONG ulSignature; /* HW info table signature string "$ATI" */ 1678
1397 UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */ 1679/****************************************************************************/
1398 UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */ 1680// Structures used by TVEncoderControlTable
1399 UCHAR ucVideoPortInfo; /* Provides the video port capabilities */ 1681/****************************************************************************/
1400 UCHAR ucHostPortInfo; /* Provides host port configuration information */ 1682typedef struct _TV_ENCODER_CONTROL_PARAMETERS
1401} ATOM_MULTIMEDIA_CAPABILITY_INFO; 1683{
1684 USHORT usPixelClock; // in 10KHz; for bios convenient
1685 UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..."
1686 UCHAR ucAction; // 0: turn off encoder
1687 // 1: setup and turn on encoder
1688}TV_ENCODER_CONTROL_PARAMETERS;
1402 1689
1403/****************************************************************************/ 1690typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
1404/* Structure used in MultimediaConfigInfoTable */ 1691{
1405/****************************************************************************/ 1692 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1406typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO { 1693 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one
1407 ATOM_COMMON_TABLE_HEADER sHeader; 1694}TV_ENCODER_CONTROL_PS_ALLOCATION;
1408 ULONG ulSignature; /* MM info table signature sting "$MMT" */
1409 UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
1410 UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
1411 UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
1412 UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
1413 UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
1414 UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
1415 UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
1416 UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1417 UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1418 UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1419 UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1420 UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1421} ATOM_MULTIMEDIA_CONFIG_INFO;
1422 1695
1423/****************************************************************************/ 1696//==============================Data Table Portion====================================
1424/* Structures used in FirmwareInfoTable */
1425/****************************************************************************/
1426 1697
1427/* usBIOSCapability Definition: */ 1698/****************************************************************************/
1428/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */ 1699// Structure used in Data.mtb
1429/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */ 1700/****************************************************************************/
1430/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */ 1701typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
1431/* Others: Reserved */ 1702{
1703 USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position!
1704 USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
1705 USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
1706 USHORT StandardVESA_Timing; // Only used by Bios
1707 USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
1708 USHORT DAC_Info; // Will be obsolete from R600
1709 USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
1710 USHORT TMDS_Info; // Will be obsolete from R600
1711 USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
1712 USHORT SupportedDevicesInfo; // Will be obsolete from R600
1713 USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
1714 USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
1715 USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
1716 USHORT VESA_ToInternalModeLUT; // Only used by Bios
1717 USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
1718 USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
1719 USHORT CompassionateData; // Will be obsolete from R600
1720 USHORT SaveRestoreInfo; // Only used by Bios
1721 USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
1722 USHORT OemInfo; // Defined and used by external SW, should be obsolete soon
1723 USHORT XTMDS_Info; // Will be obsolete from R600
1724 USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
1725 USHORT Object_Header; // Shared by various SW components,latest version 1.1
1726 USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!!
1727 USHORT MC_InitParameter; // Only used by command table
1728 USHORT ASIC_VDDC_Info; // Will be obsolete from R600
1729 USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
1730 USHORT TV_VideoMode; // Only used by command table
1731 USHORT VRAM_Info; // Only used by command table, latest version 1.3
1732 USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
1733 USHORT IntegratedSystemInfo; // Shared by various SW components
1734 USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
1735 USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
1736 USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
1737}ATOM_MASTER_LIST_OF_DATA_TABLES;
1738
1739typedef struct _ATOM_MASTER_DATA_TABLE
1740{
1741 ATOM_COMMON_TABLE_HEADER sHeader;
1742 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
1743}ATOM_MASTER_DATA_TABLE;
1744
1745/****************************************************************************/
1746// Structure used in MultimediaCapabilityInfoTable
1747/****************************************************************************/
1748typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
1749{
1750 ATOM_COMMON_TABLE_HEADER sHeader;
1751 ULONG ulSignature; // HW info table signature string "$ATI"
1752 UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
1753 UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
1754 UCHAR ucVideoPortInfo; // Provides the video port capabilities
1755 UCHAR ucHostPortInfo; // Provides host port configuration information
1756}ATOM_MULTIMEDIA_CAPABILITY_INFO;
1757
1758/****************************************************************************/
1759// Structure used in MultimediaConfigInfoTable
1760/****************************************************************************/
1761typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
1762{
1763 ATOM_COMMON_TABLE_HEADER sHeader;
1764 ULONG ulSignature; // MM info table signature sting "$MMT"
1765 UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
1766 UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
1767 UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting
1768 UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
1769 UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
1770 UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
1771 UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3)
1772 UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1773 UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1774 UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1775 UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1776 UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1777}ATOM_MULTIMEDIA_CONFIG_INFO;
1778
1779/****************************************************************************/
1780// Structures used in FirmwareInfoTable
1781/****************************************************************************/
1782
1783// usBIOSCapability Defintion:
1784// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
1785// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
1786// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
1787// Others: Reserved
1432#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 1788#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
1433#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 1789#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
1434#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 1790#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
1435#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 1791#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
1436#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 1792#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
1437#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 1793#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
1438#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 1794#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
1439#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 1795#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
@@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
1441#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 1797#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
1442#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 1798#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
1443#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 1799#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
1800#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip
1801#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip
1444 1802
1445#ifndef _H2INC 1803#ifndef _H2INC
1446 1804
1447/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ 1805//Please don't add or expand this bitfield structure below, this one will retire soon.!
1448typedef struct _ATOM_FIRMWARE_CAPABILITY { 1806typedef struct _ATOM_FIRMWARE_CAPABILITY
1807{
1449#if ATOM_BIG_ENDIAN 1808#if ATOM_BIG_ENDIAN
1450 USHORT Reserved:3; 1809 USHORT Reserved:3;
1451 USHORT HyperMemory_Size:4; 1810 USHORT HyperMemory_Size:4;
1452 USHORT HyperMemory_Support:1; 1811 USHORT HyperMemory_Support:1;
1453 USHORT PPMode_Assigned:1; 1812 USHORT PPMode_Assigned:1;
1454 USHORT WMI_SUPPORT:1; 1813 USHORT WMI_SUPPORT:1;
1455 USHORT GPUControlsBL:1; 1814 USHORT GPUControlsBL:1;
1456 USHORT EngineClockSS_Support:1; 1815 USHORT EngineClockSS_Support:1;
1457 USHORT MemoryClockSS_Support:1; 1816 USHORT MemoryClockSS_Support:1;
1458 USHORT ExtendedDesktopSupport:1; 1817 USHORT ExtendedDesktopSupport:1;
1459 USHORT DualCRTC_Support:1; 1818 USHORT DualCRTC_Support:1;
1460 USHORT FirmwarePosted:1; 1819 USHORT FirmwarePosted:1;
1461#else 1820#else
1462 USHORT FirmwarePosted:1; 1821 USHORT FirmwarePosted:1;
1463 USHORT DualCRTC_Support:1; 1822 USHORT DualCRTC_Support:1;
1464 USHORT ExtendedDesktopSupport:1; 1823 USHORT ExtendedDesktopSupport:1;
1465 USHORT MemoryClockSS_Support:1; 1824 USHORT MemoryClockSS_Support:1;
1466 USHORT EngineClockSS_Support:1; 1825 USHORT EngineClockSS_Support:1;
1467 USHORT GPUControlsBL:1; 1826 USHORT GPUControlsBL:1;
1468 USHORT WMI_SUPPORT:1; 1827 USHORT WMI_SUPPORT:1;
1469 USHORT PPMode_Assigned:1; 1828 USHORT PPMode_Assigned:1;
1470 USHORT HyperMemory_Support:1; 1829 USHORT HyperMemory_Support:1;
1471 USHORT HyperMemory_Size:4; 1830 USHORT HyperMemory_Size:4;
1472 USHORT Reserved:3; 1831 USHORT Reserved:3;
1473#endif 1832#endif
1474} ATOM_FIRMWARE_CAPABILITY; 1833}ATOM_FIRMWARE_CAPABILITY;
1475 1834
1476typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { 1835typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
1477 ATOM_FIRMWARE_CAPABILITY sbfAccess; 1836{
1478 USHORT susAccess; 1837 ATOM_FIRMWARE_CAPABILITY sbfAccess;
1479} ATOM_FIRMWARE_CAPABILITY_ACCESS; 1838 USHORT susAccess;
1839}ATOM_FIRMWARE_CAPABILITY_ACCESS;
1480 1840
1481#else 1841#else
1482 1842
1483typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { 1843typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
1484 USHORT susAccess; 1844{
1485} ATOM_FIRMWARE_CAPABILITY_ACCESS; 1845 USHORT susAccess;
1846}ATOM_FIRMWARE_CAPABILITY_ACCESS;
1486 1847
1487#endif 1848#endif
1488 1849
1489typedef struct _ATOM_FIRMWARE_INFO { 1850typedef struct _ATOM_FIRMWARE_INFO
1490 ATOM_COMMON_TABLE_HEADER sHeader; 1851{
1491 ULONG ulFirmwareRevision; 1852 ATOM_COMMON_TABLE_HEADER sHeader;
1492 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1853 ULONG ulFirmwareRevision;
1493 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1854 ULONG ulDefaultEngineClock; //In 10Khz unit
1494 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1855 ULONG ulDefaultMemoryClock; //In 10Khz unit
1495 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1856 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1496 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1857 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1497 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1858 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1498 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1859 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1499 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1860 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1500 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1861 ULONG ulASICMaxEngineClock; //In 10Khz unit
1501 UCHAR ucASICMaxTemperature; 1862 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1502 UCHAR ucPadding[3]; /* Don't use them */ 1863 UCHAR ucASICMaxTemperature;
1503 ULONG aulReservedForBIOS[3]; /* Don't use them */ 1864 UCHAR ucPadding[3]; //Don't use them
1504 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1865 ULONG aulReservedForBIOS[3]; //Don't use them
1505 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1866 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1506 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1867 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1507 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1868 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1508 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1869 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1509 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1870 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1510 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1871 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1511 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1872 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1512 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1873 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1513 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */ 1874 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1514 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1875 USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!!
1515 USHORT usReferenceClock; /* In 10Khz unit */ 1876 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1516 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1877 USHORT usReferenceClock; //In 10Khz unit
1517 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1878 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1518 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1879 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1519 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1880 UCHAR ucDesign_ID; //Indicate what is the board design
1520} ATOM_FIRMWARE_INFO; 1881 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1521 1882}ATOM_FIRMWARE_INFO;
1522typedef struct _ATOM_FIRMWARE_INFO_V1_2 { 1883
1523 ATOM_COMMON_TABLE_HEADER sHeader; 1884typedef struct _ATOM_FIRMWARE_INFO_V1_2
1524 ULONG ulFirmwareRevision; 1885{
1525 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1886 ATOM_COMMON_TABLE_HEADER sHeader;
1526 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1887 ULONG ulFirmwareRevision;
1527 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1888 ULONG ulDefaultEngineClock; //In 10Khz unit
1528 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1889 ULONG ulDefaultMemoryClock; //In 10Khz unit
1529 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1890 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1530 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1891 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1531 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1892 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1532 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1893 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1533 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1894 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1534 UCHAR ucASICMaxTemperature; 1895 ULONG ulASICMaxEngineClock; //In 10Khz unit
1535 UCHAR ucMinAllowedBL_Level; 1896 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1536 UCHAR ucPadding[2]; /* Don't use them */ 1897 UCHAR ucASICMaxTemperature;
1537 ULONG aulReservedForBIOS[2]; /* Don't use them */ 1898 UCHAR ucMinAllowedBL_Level;
1538 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1899 UCHAR ucPadding[2]; //Don't use them
1539 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1900 ULONG aulReservedForBIOS[2]; //Don't use them
1540 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1901 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1541 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1902 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1542 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1903 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1543 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1904 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1544 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1905 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1545 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1906 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1546 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1907 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1547 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1908 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1548 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1909 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1549 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1910 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1550 USHORT usReferenceClock; /* In 10Khz unit */ 1911 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1551 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1912 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1552 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1913 USHORT usReferenceClock; //In 10Khz unit
1553 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1914 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1554 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1915 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1555} ATOM_FIRMWARE_INFO_V1_2; 1916 UCHAR ucDesign_ID; //Indicate what is the board design
1556 1917 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1557typedef struct _ATOM_FIRMWARE_INFO_V1_3 { 1918}ATOM_FIRMWARE_INFO_V1_2;
1558 ATOM_COMMON_TABLE_HEADER sHeader; 1919
1559 ULONG ulFirmwareRevision; 1920typedef struct _ATOM_FIRMWARE_INFO_V1_3
1560 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1921{
1561 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1922 ATOM_COMMON_TABLE_HEADER sHeader;
1562 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1923 ULONG ulFirmwareRevision;
1563 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1924 ULONG ulDefaultEngineClock; //In 10Khz unit
1564 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1925 ULONG ulDefaultMemoryClock; //In 10Khz unit
1565 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1926 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1566 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1927 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1567 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1928 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1568 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1929 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1569 UCHAR ucASICMaxTemperature; 1930 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1570 UCHAR ucMinAllowedBL_Level; 1931 ULONG ulASICMaxEngineClock; //In 10Khz unit
1571 UCHAR ucPadding[2]; /* Don't use them */ 1932 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1572 ULONG aulReservedForBIOS; /* Don't use them */ 1933 UCHAR ucASICMaxTemperature;
1573 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ 1934 UCHAR ucMinAllowedBL_Level;
1574 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1935 UCHAR ucPadding[2]; //Don't use them
1575 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1936 ULONG aulReservedForBIOS; //Don't use them
1576 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1937 ULONG ul3DAccelerationEngineClock;//In 10Khz unit
1577 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1938 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1578 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1939 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1579 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1940 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1580 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1941 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1581 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1942 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1582 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1943 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1583 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1944 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1584 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1945 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1585 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1946 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1586 USHORT usReferenceClock; /* In 10Khz unit */ 1947 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1587 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1948 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1588 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1949 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1589 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1950 USHORT usReferenceClock; //In 10Khz unit
1590 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1951 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1591} ATOM_FIRMWARE_INFO_V1_3; 1952 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1592 1953 UCHAR ucDesign_ID; //Indicate what is the board design
1593typedef struct _ATOM_FIRMWARE_INFO_V1_4 { 1954 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1594 ATOM_COMMON_TABLE_HEADER sHeader; 1955}ATOM_FIRMWARE_INFO_V1_3;
1595 ULONG ulFirmwareRevision; 1956
1596 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1957typedef struct _ATOM_FIRMWARE_INFO_V1_4
1597 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1958{
1598 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1959 ATOM_COMMON_TABLE_HEADER sHeader;
1599 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1960 ULONG ulFirmwareRevision;
1600 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1961 ULONG ulDefaultEngineClock; //In 10Khz unit
1601 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1962 ULONG ulDefaultMemoryClock; //In 10Khz unit
1602 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1963 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1603 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1964 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1604 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1965 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1605 UCHAR ucASICMaxTemperature; 1966 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1606 UCHAR ucMinAllowedBL_Level; 1967 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1607 USHORT usBootUpVDDCVoltage; /* In MV unit */ 1968 ULONG ulASICMaxEngineClock; //In 10Khz unit
1608 USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */ 1969 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1609 USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */ 1970 UCHAR ucASICMaxTemperature;
1610 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ 1971 UCHAR ucMinAllowedBL_Level;
1611 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1972 USHORT usBootUpVDDCVoltage; //In MV unit
1612 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1973 USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
1613 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1974 USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
1614 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1975 ULONG ul3DAccelerationEngineClock;//In 10Khz unit
1615 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1976 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1616 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1977 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1617 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1978 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1618 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1979 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1619 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1980 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1620 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1981 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1621 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1982 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1622 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1983 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1623 USHORT usReferenceClock; /* In 10Khz unit */ 1984 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1624 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1985 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1625 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1986 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1626 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1987 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1627 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1988 USHORT usReferenceClock; //In 10Khz unit
1628} ATOM_FIRMWARE_INFO_V1_4; 1989 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1629 1990 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1630#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4 1991 UCHAR ucDesign_ID; //Indicate what is the board design
1631 1992 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1632/****************************************************************************/ 1993}ATOM_FIRMWARE_INFO_V1_4;
1633/* Structures used in IntegratedSystemInfoTable */ 1994
1634/****************************************************************************/ 1995//the structure below to be used from Cypress
1996typedef struct _ATOM_FIRMWARE_INFO_V2_1
1997{
1998 ATOM_COMMON_TABLE_HEADER sHeader;
1999 ULONG ulFirmwareRevision;
2000 ULONG ulDefaultEngineClock; //In 10Khz unit
2001 ULONG ulDefaultMemoryClock; //In 10Khz unit
2002 ULONG ulReserved1;
2003 ULONG ulReserved2;
2004 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
2005 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
2006 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
2007 ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock
2008 ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit
2009 UCHAR ucReserved1; //Was ucASICMaxTemperature;
2010 UCHAR ucMinAllowedBL_Level;
2011 USHORT usBootUpVDDCVoltage; //In MV unit
2012 USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
2013 USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
2014 ULONG ulReserved4; //Was ulAsicMaximumVoltage
2015 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
2016 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
2017 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
2018 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
2019 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
2020 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
2021 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
2022 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
2023 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
2024 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
2025 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
2026 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
2027 USHORT usCoreReferenceClock; //In 10Khz unit
2028 USHORT usMemoryReferenceClock; //In 10Khz unit
2029 USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
2030 UCHAR ucMemoryModule_ID; //Indicate what is the board design
2031 UCHAR ucReserved4[3];
2032}ATOM_FIRMWARE_INFO_V2_1;
2033
2034
2035#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
2036
2037/****************************************************************************/
2038// Structures used in IntegratedSystemInfoTable
2039/****************************************************************************/
1635#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 2040#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
1636#define IGP_CAP_FLAG_AC_CARD 0x4 2041#define IGP_CAP_FLAG_AC_CARD 0x4
1637#define IGP_CAP_FLAG_SDVO_CARD 0x8 2042#define IGP_CAP_FLAG_SDVO_CARD 0x8
1638#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 2043#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
1639 2044
1640typedef struct _ATOM_INTEGRATED_SYSTEM_INFO { 2045typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
1641 ATOM_COMMON_TABLE_HEADER sHeader; 2046{
1642 ULONG ulBootUpEngineClock; /* in 10kHz unit */ 2047 ATOM_COMMON_TABLE_HEADER sHeader;
1643 ULONG ulBootUpMemoryClock; /* in 10kHz unit */ 2048 ULONG ulBootUpEngineClock; //in 10kHz unit
1644 ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */ 2049 ULONG ulBootUpMemoryClock; //in 10kHz unit
1645 ULONG ulMinSystemMemoryClock; /* in 10kHz unit */ 2050 ULONG ulMaxSystemMemoryClock; //in 10kHz unit
1646 UCHAR ucNumberOfCyclesInPeriodHi; 2051 ULONG ulMinSystemMemoryClock; //in 10kHz unit
1647 UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */ 2052 UCHAR ucNumberOfCyclesInPeriodHi;
1648 USHORT usReserved1; 2053 UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
1649 USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */ 2054 USHORT usReserved1;
1650 USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */ 2055 USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage
1651 ULONG ulReserved[2]; 2056 USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage
1652 2057 ULONG ulReserved[2];
1653 USHORT usFSBClock; /* In MHz unit */ 2058
1654 USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */ 2059 USHORT usFSBClock; //In MHz unit
1655 /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */ 2060 USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
1656 /* Bit[4]==1: P/2 mode, ==0: P/1 mode */ 2061 //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
1657 USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */ 2062 //Bit[4]==1: P/2 mode, ==0: P/1 mode
1658 USHORT usK8MemoryClock; /* in MHz unit */ 2063 USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
1659 USHORT usK8SyncStartDelay; /* in 0.01 us unit */ 2064 USHORT usK8MemoryClock; //in MHz unit
1660 USHORT usK8DataReturnTime; /* in 0.01 us unit */ 2065 USHORT usK8SyncStartDelay; //in 0.01 us unit
1661 UCHAR ucMaxNBVoltage; 2066 USHORT usK8DataReturnTime; //in 0.01 us unit
1662 UCHAR ucMinNBVoltage; 2067 UCHAR ucMaxNBVoltage;
1663 UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */ 2068 UCHAR ucMinNBVoltage;
1664 UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */ 2069 UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
1665 UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */ 2070 UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
1666 UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */ 2071 UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
1667 UCHAR ucMaxNBVoltageHigh; 2072 UCHAR ucHTLinkWidth; //16 bit vs. 8 bit
1668 UCHAR ucMinNBVoltageHigh; 2073 UCHAR ucMaxNBVoltageHigh;
1669} ATOM_INTEGRATED_SYSTEM_INFO; 2074 UCHAR ucMinNBVoltageHigh;
2075}ATOM_INTEGRATED_SYSTEM_INFO;
1670 2076
1671/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO 2077/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
1672ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock 2078ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
1673 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock 2079 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
1674ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 2080ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1675 For AMD IGP,for now this can be 0 2081 For AMD IGP,for now this can be 0
1676ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 2082ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1677 For AMD IGP,for now this can be 0 2083 For AMD IGP,for now this can be 0
1678 2084
1679usFSBClock: For Intel IGP,it's FSB Freq 2085usFSBClock: For Intel IGP,it's FSB Freq
1680 For AMD IGP,it's HT Link Speed 2086 For AMD IGP,it's HT Link Speed
1681 2087
1682usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 2088usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
@@ -1687,98 +2093,113 @@ VC:Voltage Control
1687ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. 2093ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1688ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. 2094ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1689 2095
1690ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. 2096ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
1691ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 2097ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
1692 2098
1693ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. 2099ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1694ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. 2100ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1695 2101
2102
1696usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. 2103usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
1697usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. 2104usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
1698*/ 2105*/
1699 2106
2107
1700/* 2108/*
1701The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; 2109The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
1702Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. 2110Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
1703The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. 2111The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
1704 2112
1705SW components can access the IGP system infor structure in the same way as before 2113SW components can access the IGP system infor structure in the same way as before
1706*/ 2114*/
1707 2115
1708typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 { 2116
1709 ATOM_COMMON_TABLE_HEADER sHeader; 2117typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
1710 ULONG ulBootUpEngineClock; /* in 10kHz unit */ 2118{
1711 ULONG ulReserved1[2]; /* must be 0x0 for the reserved */ 2119 ATOM_COMMON_TABLE_HEADER sHeader;
1712 ULONG ulBootUpUMAClock; /* in 10kHz unit */ 2120 ULONG ulBootUpEngineClock; //in 10kHz unit
1713 ULONG ulBootUpSidePortClock; /* in 10kHz unit */ 2121 ULONG ulReserved1[2]; //must be 0x0 for the reserved
1714 ULONG ulMinSidePortClock; /* in 10kHz unit */ 2122 ULONG ulBootUpUMAClock; //in 10kHz unit
1715 ULONG ulReserved2[6]; /* must be 0x0 for the reserved */ 2123 ULONG ulBootUpSidePortClock; //in 10kHz unit
1716 ULONG ulSystemConfig; /* see explanation below */ 2124 ULONG ulMinSidePortClock; //in 10kHz unit
1717 ULONG ulBootUpReqDisplayVector; 2125 ULONG ulReserved2[6]; //must be 0x0 for the reserved
1718 ULONG ulOtherDisplayMisc; 2126 ULONG ulSystemConfig; //see explanation below
1719 ULONG ulDDISlot1Config; 2127 ULONG ulBootUpReqDisplayVector;
1720 ULONG ulDDISlot2Config; 2128 ULONG ulOtherDisplayMisc;
1721 UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */ 2129 ULONG ulDDISlot1Config;
1722 UCHAR ucUMAChannelNumber; 2130 ULONG ulDDISlot2Config;
1723 UCHAR ucDockingPinBit; 2131 UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
1724 UCHAR ucDockingPinPolarity; 2132 UCHAR ucUMAChannelNumber;
1725 ULONG ulDockingPinCFGInfo; 2133 UCHAR ucDockingPinBit;
1726 ULONG ulCPUCapInfo; 2134 UCHAR ucDockingPinPolarity;
1727 USHORT usNumberOfCyclesInPeriod; 2135 ULONG ulDockingPinCFGInfo;
1728 USHORT usMaxNBVoltage; 2136 ULONG ulCPUCapInfo;
1729 USHORT usMinNBVoltage; 2137 USHORT usNumberOfCyclesInPeriod;
1730 USHORT usBootUpNBVoltage; 2138 USHORT usMaxNBVoltage;
1731 ULONG ulHTLinkFreq; /* in 10Khz */ 2139 USHORT usMinNBVoltage;
1732 USHORT usMinHTLinkWidth; 2140 USHORT usBootUpNBVoltage;
1733 USHORT usMaxHTLinkWidth; 2141 ULONG ulHTLinkFreq; //in 10Khz
1734 USHORT usUMASyncStartDelay; 2142 USHORT usMinHTLinkWidth;
1735 USHORT usUMADataReturnTime; 2143 USHORT usMaxHTLinkWidth;
1736 USHORT usLinkStatusZeroTime; 2144 USHORT usUMASyncStartDelay;
1737 USHORT usReserved; 2145 USHORT usUMADataReturnTime;
1738 ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */ 2146 USHORT usLinkStatusZeroTime;
1739 ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */ 2147 USHORT usDACEfuse; //for storing badgap value (for RS880 only)
1740 USHORT usMaxUpStreamHTLinkWidth; 2148 ULONG ulHighVoltageHTLinkFreq; // in 10Khz
1741 USHORT usMaxDownStreamHTLinkWidth; 2149 ULONG ulLowVoltageHTLinkFreq; // in 10Khz
1742 USHORT usMinUpStreamHTLinkWidth; 2150 USHORT usMaxUpStreamHTLinkWidth;
1743 USHORT usMinDownStreamHTLinkWidth; 2151 USHORT usMaxDownStreamHTLinkWidth;
1744 ULONG ulReserved3[97]; /* must be 0x0 */ 2152 USHORT usMinUpStreamHTLinkWidth;
1745} ATOM_INTEGRATED_SYSTEM_INFO_V2; 2153 USHORT usMinDownStreamHTLinkWidth;
2154 USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
2155 USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
2156 ULONG ulReserved3[96]; //must be 0x0
2157}ATOM_INTEGRATED_SYSTEM_INFO_V2;
1746 2158
1747/* 2159/*
1748ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; 2160ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
1749ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present 2161ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
1750ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock 2162ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
1751 2163
1752ulSystemConfig: 2164ulSystemConfig:
1753Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; 2165Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
1754Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state 2166Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
1755 =0: system boots up at driver control state. Power state depends on PowerPlay table. 2167 =0: system boots up at driver control state. Power state depends on PowerPlay table.
1756Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. 2168Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
1757Bit[3]=1: Only one power state(Performance) will be supported. 2169Bit[3]=1: Only one power state(Performance) will be supported.
1758 =0: Multiple power states supported from PowerPlay table. 2170 =0: Multiple power states supported from PowerPlay table.
1759Bit[4]=1: CLMC is supported and enabled on current system. 2171Bit[4]=1: CLMC is supported and enabled on current system.
1760 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. 2172 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
1761Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. 2173Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
1762 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. 2174 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
1763Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. 2175Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
1764 =0: Voltage settings is determined by powerplay table. 2176 =0: Voltage settings is determined by powerplay table.
1765Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. 2177Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
1766 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. 2178 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
2179Bit[8]=1: CDLF is supported and enabled on current system.
2180 =0: CDLF is not supported or enabled on current system.
2181Bit[9]=1: DLL Shut Down feature is enabled on current system.
2182 =0: DLL Shut Down feature is not enabled or supported on current system.
1767 2183
1768ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. 2184ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
1769 2185
1770ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; 2186ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
1771 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; 2187 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
1772 2188
1773ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). 2189ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
1774 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) 2190 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
1775 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) 2191 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
1776 [15:8] - Lane configuration attribute; 2192 When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
2193 in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
2194 one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
2195
2196 [15:8] - Lane configuration attribute;
1777 [23:16]- Connector type, possible value: 2197 [23:16]- Connector type, possible value:
1778 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 2198 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
1779 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 2199 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
1780 CONNECTOR_OBJECT_ID_HDMI_TYPE_A 2200 CONNECTOR_OBJECT_ID_HDMI_TYPE_A
1781 CONNECTOR_OBJECT_ID_DISPLAYPORT 2201 CONNECTOR_OBJECT_ID_DISPLAYPORT
2202 CONNECTOR_OBJECT_ID_eDP
1782 [31:24]- Reserved 2203 [31:24]- Reserved
1783 2204
1784ulDDISlot2Config: Same as Slot1. 2205ulDDISlot2Config: Same as Slot1.
@@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC.
1787 2208
1788ucUMAChannelNumber: how many channels for the UMA; 2209ucUMAChannelNumber: how many channels for the UMA;
1789 2210
1790ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin 2211ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
1791ucDockingPinBit: which bit in this register to read the pin status; 2212ucDockingPinBit: which bit in this register to read the pin status;
1792ucDockingPinPolarity:Polarity of the pin when docked; 2213ucDockingPinPolarity:Polarity of the pin when docked;
1793 2214
1794ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 2215ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
1795 2216
1796usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. 2217usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
1797usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. 2218
2219usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
1798usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. 2220usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
1799 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 2221 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
1800 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 2222 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
1801 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 2223 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
2224
1802usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. 2225usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
1803 2226
1804ulHTLinkFreq: Bootup HT link Frequency in 10Khz. 2227ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
1805usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. 2228usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
1806 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1807usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
1808 If CDLW enabled, both upstream and downstream width should be the same during bootup. 2229 If CDLW enabled, both upstream and downstream width should be the same during bootup.
2230usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
2231 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1809 2232
1810usUMASyncStartDelay: Memory access latency, required for watermark calculation 2233usUMASyncStartDelay: Memory access latency, required for watermark calculation
1811usUMADataReturnTime: Memory access latency, required for watermark calculation 2234usUMADataReturnTime: Memory access latency, required for watermark calculation
1812usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us 2235usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
1813for Griffin or Greyhound. SBIOS needs to convert to actual time by: 2236for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1814 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) 2237 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
1815 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) 2238 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
@@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1817 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) 2240 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
1818 2241
1819ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. 2242ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
1820 This must be less than or equal to ulHTLinkFreq(bootup frequency). 2243 This must be less than or equal to ulHTLinkFreq(bootup frequency).
1821ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. 2244ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
1822 This must be less than or equal to ulHighVoltageHTLinkFreq. 2245 This must be less than or equal to ulHighVoltageHTLinkFreq.
1823 2246
@@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
1827usMinDownStreamHTLinkWidth: same as above. 2250usMinDownStreamHTLinkWidth: same as above.
1828*/ 2251*/
1829 2252
2253
1830#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 2254#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
1831#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 2255#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
1832#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 2256#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
1833#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 2257#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
1834#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 2258#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
1835#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 2259#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
1836#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 2260#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
1837#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 2261#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
2262#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100
2263#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200
1838 2264
1839#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF 2265#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
1840 2266
@@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above.
1851 2277
1852#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 2278#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
1853 2279
2280// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
2281typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
2282{
2283 ATOM_COMMON_TABLE_HEADER sHeader;
2284 ULONG ulBootUpEngineClock; //in 10kHz unit
2285 ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
2286 ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
2287 ULONG ulBootUpUMAClock; //in 10kHz unit
2288 ULONG ulReserved1[8]; //must be 0x0 for the reserved
2289 ULONG ulBootUpReqDisplayVector;
2290 ULONG ulOtherDisplayMisc;
2291 ULONG ulReserved2[4]; //must be 0x0 for the reserved
2292 ULONG ulSystemConfig; //TBD
2293 ULONG ulCPUCapInfo; //TBD
2294 USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
2295 USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
2296 USHORT usBootUpNBVoltage; //boot up NB voltage
2297 UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
2298 UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
2299 ULONG ulReserved3[4]; //must be 0x0 for the reserved
2300 ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition
2301 ULONG ulDDISlot2Config;
2302 ULONG ulDDISlot3Config;
2303 ULONG ulDDISlot4Config;
2304 ULONG ulReserved4[4]; //must be 0x0 for the reserved
2305 UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
2306 UCHAR ucUMAChannelNumber;
2307 USHORT usReserved;
2308 ULONG ulReserved5[4]; //must be 0x0 for the reserved
2309 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
2310 ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
2311 ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
2312 ULONG ulReserved6[61]; //must be 0x0
2313}ATOM_INTEGRATED_SYSTEM_INFO_V5;
2314
1854#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 2315#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
1855#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 2316#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
1856#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 2317#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
@@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above.
1866#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C 2327#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
1867#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D 2328#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
1868 2329
1869/* define ASIC internal encoder id ( bit vector ) */ 2330// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
1870#define ASIC_INT_DAC1_ENCODER_ID 0x00 2331#define ASIC_INT_DAC1_ENCODER_ID 0x00
1871#define ASIC_INT_TV_ENCODER_ID 0x02 2332#define ASIC_INT_TV_ENCODER_ID 0x02
1872#define ASIC_INT_DIG1_ENCODER_ID 0x03 2333#define ASIC_INT_DIG1_ENCODER_ID 0x03
1873#define ASIC_INT_DAC2_ENCODER_ID 0x04 2334#define ASIC_INT_DAC2_ENCODER_ID 0x04
@@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above.
1875#define ASIC_INT_DVO_ENCODER_ID 0x07 2336#define ASIC_INT_DVO_ENCODER_ID 0x07
1876#define ASIC_INT_DIG2_ENCODER_ID 0x09 2337#define ASIC_INT_DIG2_ENCODER_ID 0x09
1877#define ASIC_EXT_DIG_ENCODER_ID 0x05 2338#define ASIC_EXT_DIG_ENCODER_ID 0x05
2339#define ASIC_EXT_DIG2_ENCODER_ID 0x08
2340#define ASIC_INT_DIG3_ENCODER_ID 0x0a
2341#define ASIC_INT_DIG4_ENCODER_ID 0x0b
2342#define ASIC_INT_DIG5_ENCODER_ID 0x0c
2343#define ASIC_INT_DIG6_ENCODER_ID 0x0d
1878 2344
1879/* define Encoder attribute */ 2345//define Encoder attribute
1880#define ATOM_ANALOG_ENCODER 0 2346#define ATOM_ANALOG_ENCODER 0
1881#define ATOM_DIGITAL_ENCODER 1 2347#define ATOM_DIGITAL_ENCODER 1
2348#define ATOM_DP_ENCODER 2
2349
2350#define ATOM_ENCODER_ENUM_MASK 0x70
2351#define ATOM_ENCODER_ENUM_ID1 0x00
2352#define ATOM_ENCODER_ENUM_ID2 0x10
2353#define ATOM_ENCODER_ENUM_ID3 0x20
2354#define ATOM_ENCODER_ENUM_ID4 0x30
2355#define ATOM_ENCODER_ENUM_ID5 0x40
2356#define ATOM_ENCODER_ENUM_ID6 0x50
1882 2357
1883#define ATOM_DEVICE_CRT1_INDEX 0x00000000 2358#define ATOM_DEVICE_CRT1_INDEX 0x00000000
1884#define ATOM_DEVICE_LCD1_INDEX 0x00000001 2359#define ATOM_DEVICE_LCD1_INDEX 0x00000001
@@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above.
1886#define ATOM_DEVICE_DFP1_INDEX 0x00000003 2361#define ATOM_DEVICE_DFP1_INDEX 0x00000003
1887#define ATOM_DEVICE_CRT2_INDEX 0x00000004 2362#define ATOM_DEVICE_CRT2_INDEX 0x00000004
1888#define ATOM_DEVICE_LCD2_INDEX 0x00000005 2363#define ATOM_DEVICE_LCD2_INDEX 0x00000005
1889#define ATOM_DEVICE_TV2_INDEX 0x00000006 2364#define ATOM_DEVICE_DFP6_INDEX 0x00000006
1890#define ATOM_DEVICE_DFP2_INDEX 0x00000007 2365#define ATOM_DEVICE_DFP2_INDEX 0x00000007
1891#define ATOM_DEVICE_CV_INDEX 0x00000008 2366#define ATOM_DEVICE_CV_INDEX 0x00000008
1892#define ATOM_DEVICE_DFP3_INDEX 0x00000009 2367#define ATOM_DEVICE_DFP3_INDEX 0x00000009
1893#define ATOM_DEVICE_DFP4_INDEX 0x0000000A 2368#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
1894#define ATOM_DEVICE_DFP5_INDEX 0x0000000B 2369#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
2370
1895#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C 2371#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
1896#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D 2372#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
1897#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E 2373#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
1898#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F 2374#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
1899#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) 2375#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
1900#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO 2376#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
1901#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1) 2377#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 )
1902 2378
1903#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) 2379#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
1904 2380
1905#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX) 2381#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX )
1906#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX) 2382#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX )
1907#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX) 2383#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX )
1908#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX) 2384#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX )
1909#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX) 2385#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX )
1910#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX) 2386#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX )
1911#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) 2387#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX )
1912#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX) 2388#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX )
1913#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX) 2389#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX )
1914#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX) 2390#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX )
1915#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) 2391#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
1916#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX) 2392#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX )
1917 2393
1918#define ATOM_DEVICE_CRT_SUPPORT \ 2394#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
1919 (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) 2395#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
1920#define ATOM_DEVICE_DFP_SUPPORT \ 2396#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT)
1921 (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \ 2397#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1922 ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
1923 ATOM_DEVICE_DFP5_SUPPORT)
1924#define ATOM_DEVICE_TV_SUPPORT \
1925 (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
1926#define ATOM_DEVICE_LCD_SUPPORT \
1927 (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1928 2398
1929#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 2399#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
1930#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 2400#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
@@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above.
1942#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E 2412#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
1943#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F 2413#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
1944 2414
2415
1945#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F 2416#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
1946#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 2417#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
1947#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 2418#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
@@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above.
1958#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 2429#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
1959#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 2430#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
1960#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 2431#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
1961#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */ 2432#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600
1962#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */ 2433#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690
1963 2434
1964#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 2435#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
1965#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 2436#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
1966#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 2437#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
1967#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 2438#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
1968 2439
1969/* usDeviceSupport: */ 2440// usDeviceSupport:
1970/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */ 2441// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported
1971/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */ 2442// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported
1972/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */ 2443// Bit 2 = 0 - no TV1 support= 1- TV1 is supported
1973/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */ 2444// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported
1974/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */ 2445// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported
1975/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */ 2446// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported
1976/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */ 2447// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported
1977/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */ 2448// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported
1978/* Bit 8 = 0 - no CV support= 1- CV is supported */ 2449// Bit 8 = 0 - no CV support= 1- CV is supported
1979/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */ 2450// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported
1980/* Byte1 (Supported Device Info) */ 2451// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported
1981/* Bit 0 = = 0 - no CV support= 1- CV is supported */ 2452// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported
1982/* */ 2453//
1983/* */ 2454//
1984
1985/* ucI2C_ConfigID */
1986/* [7:0] - I2C LINE Associate ID */
1987/* = 0 - no I2C */
1988/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
1989/* = 0, [6:0]=SW assisted I2C ID */
1990/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
1991/* = 2, HW engine for Multimedia use */
1992/* = 3-7 Reserved for future I2C engines */
1993/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
1994
1995typedef struct _ATOM_I2C_ID_CONFIG {
1996#if ATOM_BIG_ENDIAN
1997 UCHAR bfHW_Capable:1;
1998 UCHAR bfHW_EngineID:3;
1999 UCHAR bfI2C_LineMux:4;
2000#else
2001 UCHAR bfI2C_LineMux:4;
2002 UCHAR bfHW_EngineID:3;
2003 UCHAR bfHW_Capable:1;
2004#endif
2005} ATOM_I2C_ID_CONFIG;
2006
2007typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
2008 ATOM_I2C_ID_CONFIG sbfAccess;
2009 UCHAR ucAccess;
2010} ATOM_I2C_ID_CONFIG_ACCESS;
2011 2455
2012/****************************************************************************/ 2456/****************************************************************************/
2013/* Structure used in GPIO_I2C_InfoTable */ 2457/* Structure used in MclkSS_InfoTable */
2014/****************************************************************************/ 2458/****************************************************************************/
2015typedef struct _ATOM_GPIO_I2C_ASSIGMENT { 2459// ucI2C_ConfigID
2016 USHORT usClkMaskRegisterIndex; 2460// [7:0] - I2C LINE Associate ID
2017 USHORT usClkEnRegisterIndex; 2461// = 0 - no I2C
2018 USHORT usClkY_RegisterIndex; 2462// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection)
2019 USHORT usClkA_RegisterIndex; 2463// = 0, [6:0]=SW assisted I2C ID
2020 USHORT usDataMaskRegisterIndex; 2464// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use
2021 USHORT usDataEnRegisterIndex; 2465// = 2, HW engine for Multimedia use
2022 USHORT usDataY_RegisterIndex; 2466// = 3-7 Reserved for future I2C engines
2023 USHORT usDataA_RegisterIndex; 2467// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
2024 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; 2468
2025 UCHAR ucClkMaskShift; 2469typedef struct _ATOM_I2C_ID_CONFIG
2026 UCHAR ucClkEnShift; 2470{
2027 UCHAR ucClkY_Shift; 2471#if ATOM_BIG_ENDIAN
2028 UCHAR ucClkA_Shift; 2472 UCHAR bfHW_Capable:1;
2029 UCHAR ucDataMaskShift; 2473 UCHAR bfHW_EngineID:3;
2030 UCHAR ucDataEnShift; 2474 UCHAR bfI2C_LineMux:4;
2031 UCHAR ucDataY_Shift; 2475#else
2032 UCHAR ucDataA_Shift; 2476 UCHAR bfI2C_LineMux:4;
2033 UCHAR ucReserved1; 2477 UCHAR bfHW_EngineID:3;
2034 UCHAR ucReserved2; 2478 UCHAR bfHW_Capable:1;
2035} ATOM_GPIO_I2C_ASSIGMENT; 2479#endif
2036 2480}ATOM_I2C_ID_CONFIG;
2037typedef struct _ATOM_GPIO_I2C_INFO {
2038 ATOM_COMMON_TABLE_HEADER sHeader;
2039 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2040} ATOM_GPIO_I2C_INFO;
2041 2481
2042/****************************************************************************/ 2482typedef union _ATOM_I2C_ID_CONFIG_ACCESS
2043/* Common Structure used in other structures */ 2483{
2044/****************************************************************************/ 2484 ATOM_I2C_ID_CONFIG sbfAccess;
2485 UCHAR ucAccess;
2486}ATOM_I2C_ID_CONFIG_ACCESS;
2487
2488
2489/****************************************************************************/
2490// Structure used in GPIO_I2C_InfoTable
2491/****************************************************************************/
2492typedef struct _ATOM_GPIO_I2C_ASSIGMENT
2493{
2494 USHORT usClkMaskRegisterIndex;
2495 USHORT usClkEnRegisterIndex;
2496 USHORT usClkY_RegisterIndex;
2497 USHORT usClkA_RegisterIndex;
2498 USHORT usDataMaskRegisterIndex;
2499 USHORT usDataEnRegisterIndex;
2500 USHORT usDataY_RegisterIndex;
2501 USHORT usDataA_RegisterIndex;
2502 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
2503 UCHAR ucClkMaskShift;
2504 UCHAR ucClkEnShift;
2505 UCHAR ucClkY_Shift;
2506 UCHAR ucClkA_Shift;
2507 UCHAR ucDataMaskShift;
2508 UCHAR ucDataEnShift;
2509 UCHAR ucDataY_Shift;
2510 UCHAR ucDataA_Shift;
2511 UCHAR ucReserved1;
2512 UCHAR ucReserved2;
2513}ATOM_GPIO_I2C_ASSIGMENT;
2514
2515typedef struct _ATOM_GPIO_I2C_INFO
2516{
2517 ATOM_COMMON_TABLE_HEADER sHeader;
2518 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2519}ATOM_GPIO_I2C_INFO;
2520
2521/****************************************************************************/
2522// Common Structure used in other structures
2523/****************************************************************************/
2045 2524
2046#ifndef _H2INC 2525#ifndef _H2INC
2047 2526
2048/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ 2527//Please don't add or expand this bitfield structure below, this one will retire soon.!
2049typedef struct _ATOM_MODE_MISC_INFO { 2528typedef struct _ATOM_MODE_MISC_INFO
2529{
2050#if ATOM_BIG_ENDIAN 2530#if ATOM_BIG_ENDIAN
2051 USHORT Reserved:6; 2531 USHORT Reserved:6;
2052 USHORT RGB888:1; 2532 USHORT RGB888:1;
2053 USHORT DoubleClock:1; 2533 USHORT DoubleClock:1;
2054 USHORT Interlace:1; 2534 USHORT Interlace:1;
2055 USHORT CompositeSync:1; 2535 USHORT CompositeSync:1;
2056 USHORT V_ReplicationBy2:1; 2536 USHORT V_ReplicationBy2:1;
2057 USHORT H_ReplicationBy2:1; 2537 USHORT H_ReplicationBy2:1;
2058 USHORT VerticalCutOff:1; 2538 USHORT VerticalCutOff:1;
2059 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2539 USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
2060 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2540 USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
2061 USHORT HorizontalCutOff:1; 2541 USHORT HorizontalCutOff:1;
2062#else 2542#else
2063 USHORT HorizontalCutOff:1; 2543 USHORT HorizontalCutOff:1;
2064 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2544 USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
2065 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2545 USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
2066 USHORT VerticalCutOff:1; 2546 USHORT VerticalCutOff:1;
2067 USHORT H_ReplicationBy2:1; 2547 USHORT H_ReplicationBy2:1;
2068 USHORT V_ReplicationBy2:1; 2548 USHORT V_ReplicationBy2:1;
2069 USHORT CompositeSync:1; 2549 USHORT CompositeSync:1;
2070 USHORT Interlace:1; 2550 USHORT Interlace:1;
2071 USHORT DoubleClock:1; 2551 USHORT DoubleClock:1;
2072 USHORT RGB888:1; 2552 USHORT RGB888:1;
2073 USHORT Reserved:6; 2553 USHORT Reserved:6;
2074#endif 2554#endif
2075} ATOM_MODE_MISC_INFO; 2555}ATOM_MODE_MISC_INFO;
2076 2556
2077typedef union _ATOM_MODE_MISC_INFO_ACCESS { 2557typedef union _ATOM_MODE_MISC_INFO_ACCESS
2078 ATOM_MODE_MISC_INFO sbfAccess; 2558{
2079 USHORT usAccess; 2559 ATOM_MODE_MISC_INFO sbfAccess;
2080} ATOM_MODE_MISC_INFO_ACCESS; 2560 USHORT usAccess;
2081 2561}ATOM_MODE_MISC_INFO_ACCESS;
2562
2082#else 2563#else
2083 2564
2084typedef union _ATOM_MODE_MISC_INFO_ACCESS { 2565typedef union _ATOM_MODE_MISC_INFO_ACCESS
2085 USHORT usAccess; 2566{
2086} ATOM_MODE_MISC_INFO_ACCESS; 2567 USHORT usAccess;
2087 2568}ATOM_MODE_MISC_INFO_ACCESS;
2569
2088#endif 2570#endif
2089 2571
2090/* usModeMiscInfo- */ 2572// usModeMiscInfo-
2091#define ATOM_H_CUTOFF 0x01 2573#define ATOM_H_CUTOFF 0x01
2092#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */ 2574#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low
2093#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */ 2575#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low
2094#define ATOM_V_CUTOFF 0x08 2576#define ATOM_V_CUTOFF 0x08
2095#define ATOM_H_REPLICATIONBY2 0x10 2577#define ATOM_H_REPLICATIONBY2 0x10
2096#define ATOM_V_REPLICATIONBY2 0x20 2578#define ATOM_V_REPLICATIONBY2 0x20
@@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2099#define ATOM_DOUBLE_CLOCK_MODE 0x100 2581#define ATOM_DOUBLE_CLOCK_MODE 0x100
2100#define ATOM_RGB888_MODE 0x200 2582#define ATOM_RGB888_MODE 0x200
2101 2583
2102/* usRefreshRate- */ 2584//usRefreshRate-
2103#define ATOM_REFRESH_43 43 2585#define ATOM_REFRESH_43 43
2104#define ATOM_REFRESH_47 47 2586#define ATOM_REFRESH_47 47
2105#define ATOM_REFRESH_56 56 2587#define ATOM_REFRESH_56 56
2106#define ATOM_REFRESH_60 60 2588#define ATOM_REFRESH_60 60
2107#define ATOM_REFRESH_65 65 2589#define ATOM_REFRESH_65 65
2108#define ATOM_REFRESH_70 70 2590#define ATOM_REFRESH_70 70
@@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2110#define ATOM_REFRESH_75 75 2592#define ATOM_REFRESH_75 75
2111#define ATOM_REFRESH_85 85 2593#define ATOM_REFRESH_85 85
2112 2594
2113/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */ 2595// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
2114/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */ 2596// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
2115/* */ 2597//
2116/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */ 2598// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
2117/* = EDID_HA + EDID_HBL */ 2599// = EDID_HA + EDID_HBL
2118/* VESA_HDISP = VESA_ACTIVE = EDID_HA */ 2600// VESA_HDISP = VESA_ACTIVE = EDID_HA
2119/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */ 2601// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
2120/* = EDID_HA + EDID_HSO */ 2602// = EDID_HA + EDID_HSO
2121/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */ 2603// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW
2122/* VESA_BORDER = EDID_BORDER */ 2604// VESA_BORDER = EDID_BORDER
2123 2605
2124/****************************************************************************/ 2606/****************************************************************************/
2125/* Structure used in SetCRTC_UsingDTDTimingTable */ 2607// Structure used in SetCRTC_UsingDTDTimingTable
2126/****************************************************************************/ 2608/****************************************************************************/
2127typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS { 2609typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
2128 USHORT usH_Size; 2610{
2129 USHORT usH_Blanking_Time; 2611 USHORT usH_Size;
2130 USHORT usV_Size; 2612 USHORT usH_Blanking_Time;
2131 USHORT usV_Blanking_Time; 2613 USHORT usV_Size;
2132 USHORT usH_SyncOffset; 2614 USHORT usV_Blanking_Time;
2133 USHORT usH_SyncWidth; 2615 USHORT usH_SyncOffset;
2134 USHORT usV_SyncOffset; 2616 USHORT usH_SyncWidth;
2135 USHORT usV_SyncWidth; 2617 USHORT usV_SyncOffset;
2136 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2618 USHORT usV_SyncWidth;
2137 UCHAR ucH_Border; /* From DFP EDID */ 2619 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2138 UCHAR ucV_Border; 2620 UCHAR ucH_Border; // From DFP EDID
2139 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 2621 UCHAR ucV_Border;
2140 UCHAR ucPadding[3]; 2622 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
2141} SET_CRTC_USING_DTD_TIMING_PARAMETERS; 2623 UCHAR ucPadding[3];
2142 2624}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
2143/****************************************************************************/ 2625
2144/* Structure used in SetCRTC_TimingTable */ 2626/****************************************************************************/
2145/****************************************************************************/ 2627// Structure used in SetCRTC_TimingTable
2146typedef struct _SET_CRTC_TIMING_PARAMETERS { 2628/****************************************************************************/
2147 USHORT usH_Total; /* horizontal total */ 2629typedef struct _SET_CRTC_TIMING_PARAMETERS
2148 USHORT usH_Disp; /* horizontal display */ 2630{
2149 USHORT usH_SyncStart; /* horozontal Sync start */ 2631 USHORT usH_Total; // horizontal total
2150 USHORT usH_SyncWidth; /* horizontal Sync width */ 2632 USHORT usH_Disp; // horizontal display
2151 USHORT usV_Total; /* vertical total */ 2633 USHORT usH_SyncStart; // horozontal Sync start
2152 USHORT usV_Disp; /* vertical display */ 2634 USHORT usH_SyncWidth; // horizontal Sync width
2153 USHORT usV_SyncStart; /* vertical Sync start */ 2635 USHORT usV_Total; // vertical total
2154 USHORT usV_SyncWidth; /* vertical Sync width */ 2636 USHORT usV_Disp; // vertical display
2155 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2637 USHORT usV_SyncStart; // vertical Sync start
2156 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 2638 USHORT usV_SyncWidth; // vertical Sync width
2157 UCHAR ucOverscanRight; /* right */ 2639 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2158 UCHAR ucOverscanLeft; /* left */ 2640 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
2159 UCHAR ucOverscanBottom; /* bottom */ 2641 UCHAR ucOverscanRight; // right
2160 UCHAR ucOverscanTop; /* top */ 2642 UCHAR ucOverscanLeft; // left
2161 UCHAR ucReserved; 2643 UCHAR ucOverscanBottom; // bottom
2162} SET_CRTC_TIMING_PARAMETERS; 2644 UCHAR ucOverscanTop; // top
2645 UCHAR ucReserved;
2646}SET_CRTC_TIMING_PARAMETERS;
2163#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS 2647#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
2164 2648
2165/****************************************************************************/ 2649/****************************************************************************/
2166/* Structure used in StandardVESA_TimingTable */ 2650// Structure used in StandardVESA_TimingTable
2167/* AnalogTV_InfoTable */ 2651// AnalogTV_InfoTable
2168/* ComponentVideoInfoTable */ 2652// ComponentVideoInfoTable
2169/****************************************************************************/ 2653/****************************************************************************/
2170typedef struct _ATOM_MODE_TIMING { 2654typedef struct _ATOM_MODE_TIMING
2171 USHORT usCRTC_H_Total; 2655{
2172 USHORT usCRTC_H_Disp; 2656 USHORT usCRTC_H_Total;
2173 USHORT usCRTC_H_SyncStart; 2657 USHORT usCRTC_H_Disp;
2174 USHORT usCRTC_H_SyncWidth; 2658 USHORT usCRTC_H_SyncStart;
2175 USHORT usCRTC_V_Total; 2659 USHORT usCRTC_H_SyncWidth;
2176 USHORT usCRTC_V_Disp; 2660 USHORT usCRTC_V_Total;
2177 USHORT usCRTC_V_SyncStart; 2661 USHORT usCRTC_V_Disp;
2178 USHORT usCRTC_V_SyncWidth; 2662 USHORT usCRTC_V_SyncStart;
2179 USHORT usPixelClock; /* in 10Khz unit */ 2663 USHORT usCRTC_V_SyncWidth;
2180 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2664 USHORT usPixelClock; //in 10Khz unit
2181 USHORT usCRTC_OverscanRight; 2665 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2182 USHORT usCRTC_OverscanLeft; 2666 USHORT usCRTC_OverscanRight;
2183 USHORT usCRTC_OverscanBottom; 2667 USHORT usCRTC_OverscanLeft;
2184 USHORT usCRTC_OverscanTop; 2668 USHORT usCRTC_OverscanBottom;
2185 USHORT usReserve; 2669 USHORT usCRTC_OverscanTop;
2186 UCHAR ucInternalModeNumber; 2670 USHORT usReserve;
2187 UCHAR ucRefreshRate; 2671 UCHAR ucInternalModeNumber;
2188} ATOM_MODE_TIMING; 2672 UCHAR ucRefreshRate;
2189 2673}ATOM_MODE_TIMING;
2190typedef struct _ATOM_DTD_FORMAT { 2674
2191 USHORT usPixClk; 2675typedef struct _ATOM_DTD_FORMAT
2192 USHORT usHActive; 2676{
2193 USHORT usHBlanking_Time; 2677 USHORT usPixClk;
2194 USHORT usVActive; 2678 USHORT usHActive;
2195 USHORT usVBlanking_Time; 2679 USHORT usHBlanking_Time;
2196 USHORT usHSyncOffset; 2680 USHORT usVActive;
2197 USHORT usHSyncWidth; 2681 USHORT usVBlanking_Time;
2198 USHORT usVSyncOffset; 2682 USHORT usHSyncOffset;
2199 USHORT usVSyncWidth; 2683 USHORT usHSyncWidth;
2200 USHORT usImageHSize; 2684 USHORT usVSyncOffset;
2201 USHORT usImageVSize; 2685 USHORT usVSyncWidth;
2202 UCHAR ucHBorder; 2686 USHORT usImageHSize;
2203 UCHAR ucVBorder; 2687 USHORT usImageVSize;
2204 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2688 UCHAR ucHBorder;
2205 UCHAR ucInternalModeNumber; 2689 UCHAR ucVBorder;
2206 UCHAR ucRefreshRate; 2690 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2207} ATOM_DTD_FORMAT; 2691 UCHAR ucInternalModeNumber;
2208 2692 UCHAR ucRefreshRate;
2209/****************************************************************************/ 2693}ATOM_DTD_FORMAT;
2210/* Structure used in LVDS_InfoTable */ 2694
2211/* * Need a document to describe this table */ 2695/****************************************************************************/
2212/****************************************************************************/ 2696// Structure used in LVDS_InfoTable
2697// * Need a document to describe this table
2698/****************************************************************************/
2213#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 2699#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
2214#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 2700#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
2215#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 2701#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
2216#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 2702#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
2217 2703
2218/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */ 2704//ucTableFormatRevision=1
2219/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */ 2705//ucTableContentRevision=1
2220#define LCDPANEL_CAP_READ_EDID 0x1 2706typedef struct _ATOM_LVDS_INFO
2221 2707{
2222/* ucTableFormatRevision=1 */ 2708 ATOM_COMMON_TABLE_HEADER sHeader;
2223/* ucTableContentRevision=1 */ 2709 ATOM_DTD_FORMAT sLCDTiming;
2224typedef struct _ATOM_LVDS_INFO { 2710 USHORT usModePatchTableOffset;
2225 ATOM_COMMON_TABLE_HEADER sHeader; 2711 USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
2226 ATOM_DTD_FORMAT sLCDTiming; 2712 USHORT usOffDelayInMs;
2227 USHORT usModePatchTableOffset; 2713 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2228 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ 2714 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2229 USHORT usOffDelayInMs; 2715 UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
2230 UCHAR ucPowerSequenceDigOntoDEin10Ms; 2716 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
2231 UCHAR ucPowerSequenceDEtoBLOnin10Ms; 2717 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
2232 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ 2718 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
2233 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ 2719 UCHAR ucPanelDefaultRefreshRate;
2234 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ 2720 UCHAR ucPanelIdentification;
2235 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ 2721 UCHAR ucSS_Id;
2236 UCHAR ucPanelDefaultRefreshRate; 2722}ATOM_LVDS_INFO;
2237 UCHAR ucPanelIdentification; 2723
2238 UCHAR ucSS_Id; 2724//ucTableFormatRevision=1
2239} ATOM_LVDS_INFO; 2725//ucTableContentRevision=2
2240 2726typedef struct _ATOM_LVDS_INFO_V12
2241/* ucTableFormatRevision=1 */ 2727{
2242/* ucTableContentRevision=2 */ 2728 ATOM_COMMON_TABLE_HEADER sHeader;
2243typedef struct _ATOM_LVDS_INFO_V12 { 2729 ATOM_DTD_FORMAT sLCDTiming;
2244 ATOM_COMMON_TABLE_HEADER sHeader; 2730 USHORT usExtInfoTableOffset;
2245 ATOM_DTD_FORMAT sLCDTiming; 2731 USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
2246 USHORT usExtInfoTableOffset; 2732 USHORT usOffDelayInMs;
2247 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ 2733 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2248 USHORT usOffDelayInMs; 2734 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2249 UCHAR ucPowerSequenceDigOntoDEin10Ms; 2735 UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
2250 UCHAR ucPowerSequenceDEtoBLOnin10Ms; 2736 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
2251 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ 2737 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
2252 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ 2738 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
2253 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ 2739 UCHAR ucPanelDefaultRefreshRate;
2254 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ 2740 UCHAR ucPanelIdentification;
2255 UCHAR ucPanelDefaultRefreshRate; 2741 UCHAR ucSS_Id;
2256 UCHAR ucPanelIdentification; 2742 USHORT usLCDVenderID;
2257 UCHAR ucSS_Id; 2743 USHORT usLCDProductID;
2258 USHORT usLCDVenderID; 2744 UCHAR ucLCDPanel_SpecialHandlingCap;
2259 USHORT usLCDProductID; 2745 UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
2260 UCHAR ucLCDPanel_SpecialHandlingCap; 2746 UCHAR ucReserved[2];
2261 UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */ 2747}ATOM_LVDS_INFO_V12;
2262 UCHAR ucReserved[2]; 2748
2263} ATOM_LVDS_INFO_V12; 2749//Definitions for ucLCDPanel_SpecialHandlingCap:
2750
2751//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
2752//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
2753#define LCDPANEL_CAP_READ_EDID 0x1
2754
2755//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
2756//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
2757//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
2758#define LCDPANEL_CAP_DRR_SUPPORTED 0x2
2759
2760//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
2761#define LCDPANEL_CAP_eDP 0x4
2762
2763
2764//Color Bit Depth definition in EDID V1.4 @BYTE 14h
2765//Bit 6 5 4
2766 // 0 0 0 - Color bit depth is undefined
2767 // 0 0 1 - 6 Bits per Primary Color
2768 // 0 1 0 - 8 Bits per Primary Color
2769 // 0 1 1 - 10 Bits per Primary Color
2770 // 1 0 0 - 12 Bits per Primary Color
2771 // 1 0 1 - 14 Bits per Primary Color
2772 // 1 1 0 - 16 Bits per Primary Color
2773 // 1 1 1 - Reserved
2774
2775#define PANEL_COLOR_BIT_DEPTH_MASK 0x70
2776
2777// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
2778#define PANEL_RANDOM_DITHER 0x80
2779#define PANEL_RANDOM_DITHER_MASK 0x80
2780
2264 2781
2265#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 2782#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
2266 2783
2267typedef struct _ATOM_PATCH_RECORD_MODE { 2784typedef struct _ATOM_PATCH_RECORD_MODE
2268 UCHAR ucRecordType; 2785{
2269 USHORT usHDisp; 2786 UCHAR ucRecordType;
2270 USHORT usVDisp; 2787 USHORT usHDisp;
2271} ATOM_PATCH_RECORD_MODE; 2788 USHORT usVDisp;
2789}ATOM_PATCH_RECORD_MODE;
2272 2790
2273typedef struct _ATOM_LCD_RTS_RECORD { 2791typedef struct _ATOM_LCD_RTS_RECORD
2274 UCHAR ucRecordType; 2792{
2275 UCHAR ucRTSValue; 2793 UCHAR ucRecordType;
2276} ATOM_LCD_RTS_RECORD; 2794 UCHAR ucRTSValue;
2795}ATOM_LCD_RTS_RECORD;
2277 2796
2278/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */ 2797//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
2279typedef struct _ATOM_LCD_MODE_CONTROL_CAP { 2798// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
2280 UCHAR ucRecordType; 2799typedef struct _ATOM_LCD_MODE_CONTROL_CAP
2281 USHORT usLCDCap; 2800{
2282} ATOM_LCD_MODE_CONTROL_CAP; 2801 UCHAR ucRecordType;
2802 USHORT usLCDCap;
2803}ATOM_LCD_MODE_CONTROL_CAP;
2283 2804
2284#define LCD_MODE_CAP_BL_OFF 1 2805#define LCD_MODE_CAP_BL_OFF 1
2285#define LCD_MODE_CAP_CRTC_OFF 2 2806#define LCD_MODE_CAP_CRTC_OFF 2
2286#define LCD_MODE_CAP_PANEL_OFF 4 2807#define LCD_MODE_CAP_PANEL_OFF 4
2287 2808
2288typedef struct _ATOM_FAKE_EDID_PATCH_RECORD { 2809typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
2289 UCHAR ucRecordType; 2810{
2290 UCHAR ucFakeEDIDLength; 2811 UCHAR ucRecordType;
2291 UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */ 2812 UCHAR ucFakeEDIDLength;
2813 UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
2292} ATOM_FAKE_EDID_PATCH_RECORD; 2814} ATOM_FAKE_EDID_PATCH_RECORD;
2293 2815
2294typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD { 2816typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
2295 UCHAR ucRecordType; 2817{
2296 USHORT usHSize; 2818 UCHAR ucRecordType;
2297 USHORT usVSize; 2819 USHORT usHSize;
2298} ATOM_PANEL_RESOLUTION_PATCH_RECORD; 2820 USHORT usVSize;
2821}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
2299 2822
2300#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 2823#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
2301#define LCD_RTS_RECORD_TYPE 2 2824#define LCD_RTS_RECORD_TYPE 2
@@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
2306 2829
2307/****************************Spread Spectrum Info Table Definitions **********************/ 2830/****************************Spread Spectrum Info Table Definitions **********************/
2308 2831
2309/* ucTableFormatRevision=1 */ 2832//ucTableFormatRevision=1
2310/* ucTableContentRevision=2 */ 2833//ucTableContentRevision=2
2311typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { 2834typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
2312 USHORT usSpreadSpectrumPercentage; 2835{
2313 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 2836 USHORT usSpreadSpectrumPercentage;
2314 UCHAR ucSS_Step; 2837 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD
2315 UCHAR ucSS_Delay; 2838 UCHAR ucSS_Step;
2316 UCHAR ucSS_Id; 2839 UCHAR ucSS_Delay;
2317 UCHAR ucRecommendedRef_Div; 2840 UCHAR ucSS_Id;
2318 UCHAR ucSS_Range; /* it was reserved for V11 */ 2841 UCHAR ucRecommendedRef_Div;
2319} ATOM_SPREAD_SPECTRUM_ASSIGNMENT; 2842 UCHAR ucSS_Range; //it was reserved for V11
2843}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
2320 2844
2321#define ATOM_MAX_SS_ENTRY 16 2845#define ATOM_MAX_SS_ENTRY 16
2322#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */ 2846#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
2323#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */ 2847#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
2848#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz
2849#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz
2850
2324 2851
2325#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 2852#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
2326#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 2853#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
@@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
2329#define ATOM_INTERNAL_SS_MASK 0x00000000 2856#define ATOM_INTERNAL_SS_MASK 0x00000000
2330#define ATOM_EXTERNAL_SS_MASK 0x00000002 2857#define ATOM_EXTERNAL_SS_MASK 0x00000002
2331#define EXEC_SS_STEP_SIZE_SHIFT 2 2858#define EXEC_SS_STEP_SIZE_SHIFT 2
2332#define EXEC_SS_DELAY_SHIFT 4 2859#define EXEC_SS_DELAY_SHIFT 4
2333#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 2860#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
2334 2861
2335typedef struct _ATOM_SPREAD_SPECTRUM_INFO { 2862typedef struct _ATOM_SPREAD_SPECTRUM_INFO
2336 ATOM_COMMON_TABLE_HEADER sHeader; 2863{
2337 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; 2864 ATOM_COMMON_TABLE_HEADER sHeader;
2338} ATOM_SPREAD_SPECTRUM_INFO; 2865 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
2339 2866}ATOM_SPREAD_SPECTRUM_INFO;
2340/****************************************************************************/ 2867
2341/* Structure used in AnalogTV_InfoTable (Top level) */ 2868/****************************************************************************/
2342/****************************************************************************/ 2869// Structure used in AnalogTV_InfoTable (Top level)
2343/* ucTVBootUpDefaultStd definiton: */ 2870/****************************************************************************/
2344 2871//ucTVBootUpDefaultStd definiton:
2345/* ATOM_TV_NTSC 1 */ 2872
2346/* ATOM_TV_NTSCJ 2 */ 2873//ATOM_TV_NTSC 1
2347/* ATOM_TV_PAL 3 */ 2874//ATOM_TV_NTSCJ 2
2348/* ATOM_TV_PALM 4 */ 2875//ATOM_TV_PAL 3
2349/* ATOM_TV_PALCN 5 */ 2876//ATOM_TV_PALM 4
2350/* ATOM_TV_PALN 6 */ 2877//ATOM_TV_PALCN 5
2351/* ATOM_TV_PAL60 7 */ 2878//ATOM_TV_PALN 6
2352/* ATOM_TV_SECAM 8 */ 2879//ATOM_TV_PAL60 7
2353 2880//ATOM_TV_SECAM 8
2354/* ucTVSuppportedStd definition: */ 2881
2882//ucTVSupportedStd definition:
2355#define NTSC_SUPPORT 0x1 2883#define NTSC_SUPPORT 0x1
2356#define NTSCJ_SUPPORT 0x2 2884#define NTSCJ_SUPPORT 0x2
2357 2885
@@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
2364 2892
2365#define MAX_SUPPORTED_TV_TIMING 2 2893#define MAX_SUPPORTED_TV_TIMING 2
2366 2894
2367typedef struct _ATOM_ANALOG_TV_INFO { 2895typedef struct _ATOM_ANALOG_TV_INFO
2368 ATOM_COMMON_TABLE_HEADER sHeader; 2896{
2369 UCHAR ucTV_SupportedStandard; 2897 ATOM_COMMON_TABLE_HEADER sHeader;
2370 UCHAR ucTV_BootUpDefaultStandard; 2898 UCHAR ucTV_SupportedStandard;
2371 UCHAR ucExt_TV_ASIC_ID; 2899 UCHAR ucTV_BootUpDefaultStandard;
2372 UCHAR ucExt_TV_ASIC_SlaveAddr; 2900 UCHAR ucExt_TV_ASIC_ID;
2373 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */ 2901 UCHAR ucExt_TV_ASIC_SlaveAddr;
2374 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2902 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
2375} ATOM_ANALOG_TV_INFO; 2903 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
2904}ATOM_ANALOG_TV_INFO;
2376 2905
2377#define MAX_SUPPORTED_TV_TIMING_V1_2 3 2906#define MAX_SUPPORTED_TV_TIMING_V1_2 3
2378 2907
2379typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { 2908typedef struct _ATOM_ANALOG_TV_INFO_V1_2
2380 ATOM_COMMON_TABLE_HEADER sHeader; 2909{
2381 UCHAR ucTV_SupportedStandard; 2910 ATOM_COMMON_TABLE_HEADER sHeader;
2382 UCHAR ucTV_BootUpDefaultStandard; 2911 UCHAR ucTV_SupportedStandard;
2383 UCHAR ucExt_TV_ASIC_ID; 2912 UCHAR ucTV_BootUpDefaultStandard;
2384 UCHAR ucExt_TV_ASIC_SlaveAddr; 2913 UCHAR ucExt_TV_ASIC_ID;
2385 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2914 UCHAR ucExt_TV_ASIC_SlaveAddr;
2386} ATOM_ANALOG_TV_INFO_V1_2; 2915 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
2916}ATOM_ANALOG_TV_INFO_V1_2;
2917
2918typedef struct _ATOM_DPCD_INFO
2919{
2920 UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1
2921 UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
2922 UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
2923 UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
2924}ATOM_DPCD_INFO;
2925
2926#define ATOM_DPCD_MAX_LANE_MASK 0x1F
2387 2927
2388/**************************************************************************/ 2928/**************************************************************************/
2389/* VRAM usage and their definitions */ 2929// VRAM usage and their defintions
2390 2930
2391/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */ 2931// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
2392/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */ 2932// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
2393/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */ 2933// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
2394/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */ 2934// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
2395/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */ 2935// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
2396 2936
2397#ifndef VESA_MEMORY_IN_64K_BLOCK 2937#ifndef VESA_MEMORY_IN_64K_BLOCK
2398#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */ 2938#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!)
2399#endif 2939#endif
2400 2940
2401#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */ 2941#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes
2402#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */ 2942#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes
2403#define ATOM_HWICON_INFOTABLE_SIZE 32 2943#define ATOM_HWICON_INFOTABLE_SIZE 32
2404#define MAX_DTD_MODE_IN_VRAM 6 2944#define MAX_DTD_MODE_IN_VRAM 6
2405#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */ 2945#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
2406#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */ 2946#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
2407#define DFP_ENCODER_TYPE_OFFSET 0x80 2947#define DFP_ENCODER_TYPE_OFFSET 0x80
2408#define DP_ENCODER_LANE_NUM_OFFSET 0x84 2948#define DP_ENCODER_LANE_NUM_OFFSET 0x84
2409#define DP_ENCODER_LINK_RATE_OFFSET 0x88 2949#define DP_ENCODER_LINK_RATE_OFFSET 0x88
@@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2417 2957
2418#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2958#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2419#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2959#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2420#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2960#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2421 2961
2422#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2962#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2423 2963
@@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2431 2971
2432#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2972#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2433#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2973#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2434#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2974#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2435 2975
2436#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2976#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2437#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2977#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2438#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2978#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2439 2979
2440#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2980#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2441#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2981#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2442#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2982#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2443 2983
@@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2457#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2997#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2458#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2998#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2459 2999
2460#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) 3000#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2461 3001
2462#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256) 3002#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
2463#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512) 3003#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
2464 3004
2465/* The size below is in Kb! */ 3005//The size below is in Kb!
2466#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) 3006#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
2467 3007
2468#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L 3008#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
2469#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 3009#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
2470#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 3010#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
2471#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 3011#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
2472 3012
2473/***********************************************************************************/ 3013/***********************************************************************************/
2474/* Structure used in VRAM_UsageByFirmwareTable */ 3014// Structure used in VRAM_UsageByFirmwareTable
2475/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */ 3015// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
2476/* at running time. */ 3016// at running time.
2477/* note2: From RV770, the memory is more than 32bit addressable, so we will change */ 3017// note2: From RV770, the memory is more than 32bit addressable, so we will change
2478/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */ 3018// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
2479/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */ 3019// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
2480/* (in offset to start of memory address) is KB aligned instead of byte aligend. */ 3020// (in offset to start of memory address) is KB aligned instead of byte aligend.
2481/***********************************************************************************/ 3021/***********************************************************************************/
3022// Note3:
3023/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
3024for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
3025
3026If (ulStartAddrUsedByFirmware!=0)
3027FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
3028Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
3029else //Non VGA case
3030 if (FB_Size<=2Gb)
3031 FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
3032 else
3033 FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
3034
3035CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
3036
2482#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 3037#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
2483 3038
2484typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO { 3039typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
2485 ULONG ulStartAddrUsedByFirmware; 3040{
2486 USHORT usFirmwareUseInKb; 3041 ULONG ulStartAddrUsedByFirmware;
2487 USHORT usReserved; 3042 USHORT usFirmwareUseInKb;
2488} ATOM_FIRMWARE_VRAM_RESERVE_INFO; 3043 USHORT usReserved;
3044}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
2489 3045
2490typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE { 3046typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
2491 ATOM_COMMON_TABLE_HEADER sHeader; 3047{
2492 ATOM_FIRMWARE_VRAM_RESERVE_INFO 3048 ATOM_COMMON_TABLE_HEADER sHeader;
2493 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; 3049 ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
2494} ATOM_VRAM_USAGE_BY_FIRMWARE; 3050}ATOM_VRAM_USAGE_BY_FIRMWARE;
2495 3051
2496/****************************************************************************/ 3052// change verion to 1.5, when allow driver to allocate the vram area for command table access.
2497/* Structure used in GPIO_Pin_LUTTable */ 3053typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
2498/****************************************************************************/ 3054{
2499typedef struct _ATOM_GPIO_PIN_ASSIGNMENT { 3055 ULONG ulStartAddrUsedByFirmware;
2500 USHORT usGpioPin_AIndex; 3056 USHORT usFirmwareUseInKb;
2501 UCHAR ucGpioPinBitShift; 3057 USHORT usFBUsedByDrvInKb;
2502 UCHAR ucGPIO_ID; 3058}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
2503} ATOM_GPIO_PIN_ASSIGNMENT;
2504 3059
2505typedef struct _ATOM_GPIO_PIN_LUT { 3060typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
2506 ATOM_COMMON_TABLE_HEADER sHeader; 3061{
2507 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; 3062 ATOM_COMMON_TABLE_HEADER sHeader;
2508} ATOM_GPIO_PIN_LUT; 3063 ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
3064}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
3065
3066/****************************************************************************/
3067// Structure used in GPIO_Pin_LUTTable
3068/****************************************************************************/
3069typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
3070{
3071 USHORT usGpioPin_AIndex;
3072 UCHAR ucGpioPinBitShift;
3073 UCHAR ucGPIO_ID;
3074}ATOM_GPIO_PIN_ASSIGNMENT;
2509 3075
2510/****************************************************************************/ 3076typedef struct _ATOM_GPIO_PIN_LUT
2511/* Structure used in ComponentVideoInfoTable */ 3077{
2512/****************************************************************************/ 3078 ATOM_COMMON_TABLE_HEADER sHeader;
3079 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
3080}ATOM_GPIO_PIN_LUT;
3081
3082/****************************************************************************/
3083// Structure used in ComponentVideoInfoTable
3084/****************************************************************************/
2513#define GPIO_PIN_ACTIVE_HIGH 0x1 3085#define GPIO_PIN_ACTIVE_HIGH 0x1
2514 3086
2515#define MAX_SUPPORTED_CV_STANDARDS 5 3087#define MAX_SUPPORTED_CV_STANDARDS 5
2516 3088
2517/* definitions for ATOM_D_INFO.ucSettings */ 3089// definitions for ATOM_D_INFO.ucSettings
2518#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */ 3090#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0]
2519#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */ 3091#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out
2520#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */ 3092#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7]
2521 3093
2522typedef struct _ATOM_GPIO_INFO { 3094typedef struct _ATOM_GPIO_INFO
2523 USHORT usAOffset; 3095{
2524 UCHAR ucSettings; 3096 USHORT usAOffset;
2525 UCHAR ucReserved; 3097 UCHAR ucSettings;
2526} ATOM_GPIO_INFO; 3098 UCHAR ucReserved;
3099}ATOM_GPIO_INFO;
2527 3100
2528/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */ 3101// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
2529#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 3102#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
2530 3103
2531/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */ 3104// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
2532#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */ 3105#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7];
2533#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */ 3106#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0]
2534 3107
2535/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */ 3108// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
2536/* Line 3 out put 5V. */ 3109//Line 3 out put 5V.
2537#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */ 3110#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9
2538#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */ 3111#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9
2539#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 3112#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
2540 3113
2541/* Line 3 out put 2.2V */ 3114//Line 3 out put 2.2V
2542#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */ 3115#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box
2543#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */ 3116#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box
2544#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 3117#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
2545 3118
2546/* Line 3 out put 0V */ 3119//Line 3 out put 0V
2547#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */ 3120#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3
2548#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */ 3121#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3
2549#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 3122#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
2550 3123
2551#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */ 3124#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0]
2552 3125
2553#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */ 3126#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7
2554 3127
2555/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */ 3128//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
2556#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ 3129#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
2557#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ 3130#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
2558 3131
2559typedef struct _ATOM_COMPONENT_VIDEO_INFO { 3132
2560 ATOM_COMMON_TABLE_HEADER sHeader; 3133typedef struct _ATOM_COMPONENT_VIDEO_INFO
2561 USHORT usMask_PinRegisterIndex; 3134{
2562 USHORT usEN_PinRegisterIndex; 3135 ATOM_COMMON_TABLE_HEADER sHeader;
2563 USHORT usY_PinRegisterIndex; 3136 USHORT usMask_PinRegisterIndex;
2564 USHORT usA_PinRegisterIndex; 3137 USHORT usEN_PinRegisterIndex;
2565 UCHAR ucBitShift; 3138 USHORT usY_PinRegisterIndex;
2566 UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */ 3139 USHORT usA_PinRegisterIndex;
2567 ATOM_DTD_FORMAT sReserved; /* must be zeroed out */ 3140 UCHAR ucBitShift;
2568 UCHAR ucMiscInfo; 3141 UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low
2569 UCHAR uc480i; 3142 ATOM_DTD_FORMAT sReserved; // must be zeroed out
2570 UCHAR uc480p; 3143 UCHAR ucMiscInfo;
2571 UCHAR uc720p; 3144 UCHAR uc480i;
2572 UCHAR uc1080i; 3145 UCHAR uc480p;
2573 UCHAR ucLetterBoxMode; 3146 UCHAR uc720p;
2574 UCHAR ucReserved[3]; 3147 UCHAR uc1080i;
2575 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ 3148 UCHAR ucLetterBoxMode;
2576 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; 3149 UCHAR ucReserved[3];
2577 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; 3150 UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
2578} ATOM_COMPONENT_VIDEO_INFO; 3151 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
2579 3152 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
2580/* ucTableFormatRevision=2 */ 3153}ATOM_COMPONENT_VIDEO_INFO;
2581/* ucTableContentRevision=1 */ 3154
2582typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 { 3155//ucTableFormatRevision=2
2583 ATOM_COMMON_TABLE_HEADER sHeader; 3156//ucTableContentRevision=1
2584 UCHAR ucMiscInfo; 3157typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
2585 UCHAR uc480i; 3158{
2586 UCHAR uc480p; 3159 ATOM_COMMON_TABLE_HEADER sHeader;
2587 UCHAR uc720p; 3160 UCHAR ucMiscInfo;
2588 UCHAR uc1080i; 3161 UCHAR uc480i;
2589 UCHAR ucReserved; 3162 UCHAR uc480p;
2590 UCHAR ucLetterBoxMode; 3163 UCHAR uc720p;
2591 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ 3164 UCHAR uc1080i;
2592 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; 3165 UCHAR ucReserved;
2593 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; 3166 UCHAR ucLetterBoxMode;
2594} ATOM_COMPONENT_VIDEO_INFO_V21; 3167 UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
3168 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
3169 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
3170}ATOM_COMPONENT_VIDEO_INFO_V21;
2595 3171
2596#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 3172#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
2597 3173
2598/****************************************************************************/ 3174/****************************************************************************/
2599/* Structure used in object_InfoTable */ 3175// Structure used in object_InfoTable
2600/****************************************************************************/ 3176/****************************************************************************/
2601typedef struct _ATOM_OBJECT_HEADER { 3177typedef struct _ATOM_OBJECT_HEADER
2602 ATOM_COMMON_TABLE_HEADER sHeader; 3178{
2603 USHORT usDeviceSupport; 3179 ATOM_COMMON_TABLE_HEADER sHeader;
2604 USHORT usConnectorObjectTableOffset; 3180 USHORT usDeviceSupport;
2605 USHORT usRouterObjectTableOffset; 3181 USHORT usConnectorObjectTableOffset;
2606 USHORT usEncoderObjectTableOffset; 3182 USHORT usRouterObjectTableOffset;
2607 USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */ 3183 USHORT usEncoderObjectTableOffset;
2608 USHORT usDisplayPathTableOffset; 3184 USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
2609} ATOM_OBJECT_HEADER; 3185 USHORT usDisplayPathTableOffset;
2610 3186}ATOM_OBJECT_HEADER;
2611typedef struct _ATOM_DISPLAY_OBJECT_PATH { 3187
2612 USHORT usDeviceTag; /* supported device */ 3188typedef struct _ATOM_OBJECT_HEADER_V3
2613 USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */ 3189{
2614 USHORT usConnObjectId; /* Connector Object ID */ 3190 ATOM_COMMON_TABLE_HEADER sHeader;
2615 USHORT usGPUObjectId; /* GPU ID */ 3191 USHORT usDeviceSupport;
2616 USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */ 3192 USHORT usConnectorObjectTableOffset;
2617} ATOM_DISPLAY_OBJECT_PATH; 3193 USHORT usRouterObjectTableOffset;
2618 3194 USHORT usEncoderObjectTableOffset;
2619typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE { 3195 USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
2620 UCHAR ucNumOfDispPath; 3196 USHORT usDisplayPathTableOffset;
2621 UCHAR ucVersion; 3197 USHORT usMiscObjectTableOffset;
2622 UCHAR ucPadding[2]; 3198}ATOM_OBJECT_HEADER_V3;
2623 ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; 3199
2624} ATOM_DISPLAY_OBJECT_PATH_TABLE; 3200typedef struct _ATOM_DISPLAY_OBJECT_PATH
2625 3201{
2626typedef struct _ATOM_OBJECT /* each object has this structure */ 3202 USHORT usDeviceTag; //supported device
2627{ 3203 USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
2628 USHORT usObjectID; 3204 USHORT usConnObjectId; //Connector Object ID
2629 USHORT usSrcDstTableOffset; 3205 USHORT usGPUObjectId; //GPU ID
2630 USHORT usRecordOffset; /* this pointing to a bunch of records defined below */ 3206 USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
2631 USHORT usReserved; 3207}ATOM_DISPLAY_OBJECT_PATH;
2632} ATOM_OBJECT; 3208
2633 3209typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
2634typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */ 3210{
2635{ 3211 UCHAR ucNumOfDispPath;
2636 UCHAR ucNumberOfObjects; 3212 UCHAR ucVersion;
2637 UCHAR ucPadding[3]; 3213 UCHAR ucPadding[2];
2638 ATOM_OBJECT asObjects[1]; 3214 ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
2639} ATOM_OBJECT_TABLE; 3215}ATOM_DISPLAY_OBJECT_PATH_TABLE;
2640 3216
2641typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */ 3217
2642{ 3218typedef struct _ATOM_OBJECT //each object has this structure
2643 UCHAR ucNumberOfSrc; 3219{
2644 USHORT usSrcObjectID[1]; 3220 USHORT usObjectID;
2645 UCHAR ucNumberOfDst; 3221 USHORT usSrcDstTableOffset;
2646 USHORT usDstObjectID[1]; 3222 USHORT usRecordOffset; //this pointing to a bunch of records defined below
2647} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; 3223 USHORT usReserved;
2648 3224}ATOM_OBJECT;
2649/* Related definitions, all records are differnt but they have a commond header */ 3225
2650typedef struct _ATOM_COMMON_RECORD_HEADER { 3226typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure
2651 UCHAR ucRecordType; /* An emun to indicate the record type */ 3227{
2652 UCHAR ucRecordSize; /* The size of the whole record in byte */ 3228 UCHAR ucNumberOfObjects;
2653} ATOM_COMMON_RECORD_HEADER; 3229 UCHAR ucPadding[3];
2654 3230 ATOM_OBJECT asObjects[1];
2655#define ATOM_I2C_RECORD_TYPE 1 3231}ATOM_OBJECT_TABLE;
3232
3233typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
3234{
3235 UCHAR ucNumberOfSrc;
3236 USHORT usSrcObjectID[1];
3237 UCHAR ucNumberOfDst;
3238 USHORT usDstObjectID[1];
3239}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
3240
3241
3242//Two definitions below are for OPM on MXM module designs
3243
3244#define EXT_HPDPIN_LUTINDEX_0 0
3245#define EXT_HPDPIN_LUTINDEX_1 1
3246#define EXT_HPDPIN_LUTINDEX_2 2
3247#define EXT_HPDPIN_LUTINDEX_3 3
3248#define EXT_HPDPIN_LUTINDEX_4 4
3249#define EXT_HPDPIN_LUTINDEX_5 5
3250#define EXT_HPDPIN_LUTINDEX_6 6
3251#define EXT_HPDPIN_LUTINDEX_7 7
3252#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1)
3253
3254#define EXT_AUXDDC_LUTINDEX_0 0
3255#define EXT_AUXDDC_LUTINDEX_1 1
3256#define EXT_AUXDDC_LUTINDEX_2 2
3257#define EXT_AUXDDC_LUTINDEX_3 3
3258#define EXT_AUXDDC_LUTINDEX_4 4
3259#define EXT_AUXDDC_LUTINDEX_5 5
3260#define EXT_AUXDDC_LUTINDEX_6 6
3261#define EXT_AUXDDC_LUTINDEX_7 7
3262#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
3263
3264typedef struct _EXT_DISPLAY_PATH
3265{
3266 USHORT usDeviceTag; //A bit vector to show what devices are supported
3267 USHORT usDeviceACPIEnum; //16bit device ACPI id.
3268 USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions
3269 UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
3270 UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
3271 USHORT usExtEncoderObjId; //external encoder object id
3272 USHORT usReserved[3];
3273}EXT_DISPLAY_PATH;
3274
3275#define NUMBER_OF_UCHAR_FOR_GUID 16
3276#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
3277
3278typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
3279{
3280 ATOM_COMMON_TABLE_HEADER sHeader;
3281 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
3282 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
3283 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
3284 UCHAR Reserved [7]; // for potential expansion
3285}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
3286
3287//Related definitions, all records are differnt but they have a commond header
3288typedef struct _ATOM_COMMON_RECORD_HEADER
3289{
3290 UCHAR ucRecordType; //An emun to indicate the record type
3291 UCHAR ucRecordSize; //The size of the whole record in byte
3292}ATOM_COMMON_RECORD_HEADER;
3293
3294
3295#define ATOM_I2C_RECORD_TYPE 1
2656#define ATOM_HPD_INT_RECORD_TYPE 2 3296#define ATOM_HPD_INT_RECORD_TYPE 2
2657#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 3297#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
2658#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 3298#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
2659#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3299#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2660#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3300#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2661#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 3301#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
2662#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3302#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2663#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 3303#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
2664#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 3304#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
2665#define ATOM_CONNECTOR_CF_RECORD_TYPE 11 3305#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
2666#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 3306#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
2667#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 3307#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
2668#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 3308#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
2669#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 3309#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
2670 3310#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table
2671/* Must be updated when new record type is added,equal to that record definition! */ 3311#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
2672#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE 3312#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
2673 3313#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
2674typedef struct _ATOM_I2C_RECORD { 3314
2675 ATOM_COMMON_RECORD_HEADER sheader; 3315
2676 ATOM_I2C_ID_CONFIG sucI2cId; 3316//Must be updated when new record type is added,equal to that record definition!
2677 UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */ 3317#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
2678} ATOM_I2C_RECORD; 3318
2679 3319typedef struct _ATOM_I2C_RECORD
2680typedef struct _ATOM_HPD_INT_RECORD { 3320{
2681 ATOM_COMMON_RECORD_HEADER sheader; 3321 ATOM_COMMON_RECORD_HEADER sheader;
2682 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 3322 ATOM_I2C_ID_CONFIG sucI2cId;
2683 UCHAR ucPlugged_PinState; 3323 UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC
2684} ATOM_HPD_INT_RECORD; 3324}ATOM_I2C_RECORD;
2685 3325
2686typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { 3326typedef struct _ATOM_HPD_INT_RECORD
2687 ATOM_COMMON_RECORD_HEADER sheader; 3327{
2688 UCHAR ucProtectionFlag; 3328 ATOM_COMMON_RECORD_HEADER sheader;
2689 UCHAR ucReserved; 3329 UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
2690} ATOM_OUTPUT_PROTECTION_RECORD; 3330 UCHAR ucPlugged_PinState;
2691 3331}ATOM_HPD_INT_RECORD;
2692typedef struct _ATOM_CONNECTOR_DEVICE_TAG { 3332
2693 ULONG ulACPIDeviceEnum; /* Reserved for now */ 3333
2694 USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */ 3334typedef struct _ATOM_OUTPUT_PROTECTION_RECORD
2695 USHORT usPadding; 3335{
2696} ATOM_CONNECTOR_DEVICE_TAG; 3336 ATOM_COMMON_RECORD_HEADER sheader;
2697 3337 UCHAR ucProtectionFlag;
2698typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD { 3338 UCHAR ucReserved;
2699 ATOM_COMMON_RECORD_HEADER sheader; 3339}ATOM_OUTPUT_PROTECTION_RECORD;
2700 UCHAR ucNumberOfDevice; 3340
2701 UCHAR ucReserved; 3341typedef struct _ATOM_CONNECTOR_DEVICE_TAG
2702 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */ 3342{
2703} ATOM_CONNECTOR_DEVICE_TAG_RECORD; 3343 ULONG ulACPIDeviceEnum; //Reserved for now
2704 3344 USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
2705typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD { 3345 USHORT usPadding;
2706 ATOM_COMMON_RECORD_HEADER sheader; 3346}ATOM_CONNECTOR_DEVICE_TAG;
2707 UCHAR ucConfigGPIOID; 3347
2708 UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */ 3348typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
2709 UCHAR ucFlowinGPIPID; 3349{
2710 UCHAR ucExtInGPIPID; 3350 ATOM_COMMON_RECORD_HEADER sheader;
2711} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; 3351 UCHAR ucNumberOfDevice;
2712 3352 UCHAR ucReserved;
2713typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD { 3353 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
2714 ATOM_COMMON_RECORD_HEADER sheader; 3354}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
2715 UCHAR ucCTL1GPIO_ID; 3355
2716 UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */ 3356
2717 UCHAR ucCTL2GPIO_ID; 3357typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
2718 UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */ 3358{
2719 UCHAR ucCTL3GPIO_ID; 3359 ATOM_COMMON_RECORD_HEADER sheader;
2720 UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */ 3360 UCHAR ucConfigGPIOID;
2721 UCHAR ucCTLFPGA_IN_ID; 3361 UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in
2722 UCHAR ucPadding[3]; 3362 UCHAR ucFlowinGPIPID;
2723} ATOM_ENCODER_FPGA_CONTROL_RECORD; 3363 UCHAR ucExtInGPIPID;
2724 3364}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
2725typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD { 3365
2726 ATOM_COMMON_RECORD_HEADER sheader; 3366typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD
2727 UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 3367{
2728 UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */ 3368 ATOM_COMMON_RECORD_HEADER sheader;
2729} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; 3369 UCHAR ucCTL1GPIO_ID;
2730 3370 UCHAR ucCTL1GPIOState; //Set to 1 when it's active high
2731typedef struct _ATOM_JTAG_RECORD { 3371 UCHAR ucCTL2GPIO_ID;
2732 ATOM_COMMON_RECORD_HEADER sheader; 3372 UCHAR ucCTL2GPIOState; //Set to 1 when it's active high
2733 UCHAR ucTMSGPIO_ID; 3373 UCHAR ucCTL3GPIO_ID;
2734 UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */ 3374 UCHAR ucCTL3GPIOState; //Set to 1 when it's active high
2735 UCHAR ucTCKGPIO_ID; 3375 UCHAR ucCTLFPGA_IN_ID;
2736 UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */ 3376 UCHAR ucPadding[3];
2737 UCHAR ucTDOGPIO_ID; 3377}ATOM_ENCODER_FPGA_CONTROL_RECORD;
2738 UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */ 3378
2739 UCHAR ucTDIGPIO_ID; 3379typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
2740 UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */ 3380{
2741 UCHAR ucPadding[2]; 3381 ATOM_COMMON_RECORD_HEADER sheader;
2742} ATOM_JTAG_RECORD; 3382 UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
2743 3383 UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected
2744/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */ 3384}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
2745typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR { 3385
2746 UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */ 3386typedef struct _ATOM_JTAG_RECORD
2747 UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */ 3387{
2748} ATOM_GPIO_PIN_CONTROL_PAIR; 3388 ATOM_COMMON_RECORD_HEADER sheader;
2749 3389 UCHAR ucTMSGPIO_ID;
2750typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD { 3390 UCHAR ucTMSGPIOState; //Set to 1 when it's active high
2751 ATOM_COMMON_RECORD_HEADER sheader; 3391 UCHAR ucTCKGPIO_ID;
2752 UCHAR ucFlags; /* Future expnadibility */ 3392 UCHAR ucTCKGPIOState; //Set to 1 when it's active high
2753 UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */ 3393 UCHAR ucTDOGPIO_ID;
2754 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */ 3394 UCHAR ucTDOGPIOState; //Set to 1 when it's active high
2755} ATOM_OBJECT_GPIO_CNTL_RECORD; 3395 UCHAR ucTDIGPIO_ID;
2756 3396 UCHAR ucTDIGPIOState; //Set to 1 when it's active high
2757/* Definitions for GPIO pin state */ 3397 UCHAR ucPadding[2];
3398}ATOM_JTAG_RECORD;
3399
3400
3401//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
3402typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
3403{
3404 UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table
3405 UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin
3406}ATOM_GPIO_PIN_CONTROL_PAIR;
3407
3408typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
3409{
3410 ATOM_COMMON_RECORD_HEADER sheader;
3411 UCHAR ucFlags; // Future expnadibility
3412 UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
3413 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
3414}ATOM_OBJECT_GPIO_CNTL_RECORD;
3415
3416//Definitions for GPIO pin state
2758#define GPIO_PIN_TYPE_INPUT 0x00 3417#define GPIO_PIN_TYPE_INPUT 0x00
2759#define GPIO_PIN_TYPE_OUTPUT 0x10 3418#define GPIO_PIN_TYPE_OUTPUT 0x10
2760#define GPIO_PIN_TYPE_HW_CONTROL 0x20 3419#define GPIO_PIN_TYPE_HW_CONTROL 0x20
2761 3420
2762/* For GPIO_PIN_TYPE_OUTPUT the following is defined */ 3421//For GPIO_PIN_TYPE_OUTPUT the following is defined
2763#define GPIO_PIN_OUTPUT_STATE_MASK 0x01 3422#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
2764#define GPIO_PIN_OUTPUT_STATE_SHIFT 0 3423#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
2765#define GPIO_PIN_STATE_ACTIVE_LOW 0x0 3424#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
2766#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 3425#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
2767 3426
2768typedef struct _ATOM_ENCODER_DVO_CF_RECORD { 3427// Indexes to GPIO array in GLSync record
2769 ATOM_COMMON_RECORD_HEADER sheader; 3428#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
2770 ULONG ulStrengthControl; /* DVOA strength control for CF */ 3429#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
2771 UCHAR ucPadding[2]; 3430#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
2772} ATOM_ENCODER_DVO_CF_RECORD; 3431#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3
3432#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
3433#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
3434#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
3435#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
3436
3437typedef struct _ATOM_ENCODER_DVO_CF_RECORD
3438{
3439 ATOM_COMMON_RECORD_HEADER sheader;
3440 ULONG ulStrengthControl; // DVOA strength control for CF
3441 UCHAR ucPadding[2];
3442}ATOM_ENCODER_DVO_CF_RECORD;
2773 3443
2774/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */ 3444// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
2775#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 3445#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
2776#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 3446#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
2777 3447
2778typedef struct _ATOM_CONNECTOR_CF_RECORD { 3448typedef struct _ATOM_CONNECTOR_CF_RECORD
2779 ATOM_COMMON_RECORD_HEADER sheader; 3449{
2780 USHORT usMaxPixClk; 3450 ATOM_COMMON_RECORD_HEADER sheader;
2781 UCHAR ucFlowCntlGpioId; 3451 USHORT usMaxPixClk;
2782 UCHAR ucSwapCntlGpioId; 3452 UCHAR ucFlowCntlGpioId;
2783 UCHAR ucConnectedDvoBundle; 3453 UCHAR ucSwapCntlGpioId;
2784 UCHAR ucPadding; 3454 UCHAR ucConnectedDvoBundle;
2785} ATOM_CONNECTOR_CF_RECORD; 3455 UCHAR ucPadding;
2786 3456}ATOM_CONNECTOR_CF_RECORD;
2787typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD { 3457
2788 ATOM_COMMON_RECORD_HEADER sheader; 3458typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
2789 ATOM_DTD_FORMAT asTiming; 3459{
2790} ATOM_CONNECTOR_HARDCODE_DTD_RECORD; 3460 ATOM_COMMON_RECORD_HEADER sheader;
2791 3461 ATOM_DTD_FORMAT asTiming;
2792typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD { 3462}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
2793 ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */ 3463
2794 UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */ 3464typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
2795 UCHAR ucReserved; 3465{
2796} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; 3466 ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
2797 3467 UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
2798typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD { 3468 UCHAR ucReserved;
2799 ATOM_COMMON_RECORD_HEADER sheader; 3469}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
2800 UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */ 3470
2801 UCHAR ucMuxControlPin; 3471
2802 UCHAR ucMuxState[2]; /* for alligment purpose */ 3472typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
2803} ATOM_ROUTER_DDC_PATH_SELECT_RECORD; 3473{
2804 3474 ATOM_COMMON_RECORD_HEADER sheader;
2805typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD { 3475 UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
2806 ATOM_COMMON_RECORD_HEADER sheader; 3476 UCHAR ucMuxControlPin;
2807 UCHAR ucMuxType; 3477 UCHAR ucMuxState[2]; //for alligment purpose
2808 UCHAR ucMuxControlPin; 3478}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
2809 UCHAR ucMuxState[2]; /* for alligment purpose */ 3479
2810} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; 3480typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
2811 3481{
2812/* define ucMuxType */ 3482 ATOM_COMMON_RECORD_HEADER sheader;
3483 UCHAR ucMuxType;
3484 UCHAR ucMuxControlPin;
3485 UCHAR ucMuxState[2]; //for alligment purpose
3486}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
3487
3488// define ucMuxType
2813#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f 3489#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
2814#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 3490#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
2815 3491
2816/****************************************************************************/ 3492typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
2817/* ASIC voltage data table */ 3493{
2818/****************************************************************************/ 3494 ATOM_COMMON_RECORD_HEADER sheader;
2819typedef struct _ATOM_VOLTAGE_INFO_HEADER { 3495 UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
2820 USHORT usVDDCBaseLevel; /* In number of 50mv unit */ 3496}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
2821 USHORT usReserved; /* For possible extension table offset */ 3497
2822 UCHAR ucNumOfVoltageEntries; 3498typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
2823 UCHAR ucBytesPerVoltageEntry; 3499{
2824 UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */ 3500 ATOM_COMMON_RECORD_HEADER sheader;
2825 UCHAR ucDefaultVoltageEntry; 3501 ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID
2826 UCHAR ucVoltageControlI2cLine; 3502}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
2827 UCHAR ucVoltageControlAddress; 3503
2828 UCHAR ucVoltageControlOffset; 3504typedef struct _ATOM_OBJECT_LINK_RECORD
2829} ATOM_VOLTAGE_INFO_HEADER; 3505{
2830 3506 ATOM_COMMON_RECORD_HEADER sheader;
2831typedef struct _ATOM_VOLTAGE_INFO { 3507 USHORT usObjectID; //could be connector, encorder or other object in object.h
2832 ATOM_COMMON_TABLE_HEADER sHeader; 3508}ATOM_OBJECT_LINK_RECORD;
2833 ATOM_VOLTAGE_INFO_HEADER viHeader; 3509
2834 UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */ 3510typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
2835} ATOM_VOLTAGE_INFO; 3511{
2836 3512 ATOM_COMMON_RECORD_HEADER sheader;
2837typedef struct _ATOM_VOLTAGE_FORMULA { 3513 USHORT usReserved;
2838 USHORT usVoltageBaseLevel; /* In number of 1mv unit */ 3514}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
2839 USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */ 3515
2840 UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */ 3516/****************************************************************************/
2841 UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */ 3517// ASIC voltage data table
2842 UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */ 3518/****************************************************************************/
2843 UCHAR ucReserved; 3519typedef struct _ATOM_VOLTAGE_INFO_HEADER
2844 UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */ 3520{
2845} ATOM_VOLTAGE_FORMULA; 3521 USHORT usVDDCBaseLevel; //In number of 50mv unit
2846 3522 USHORT usReserved; //For possible extension table offset
2847typedef struct _ATOM_VOLTAGE_CONTROL { 3523 UCHAR ucNumOfVoltageEntries;
2848 UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */ 3524 UCHAR ucBytesPerVoltageEntry;
2849 UCHAR ucVoltageControlI2cLine; 3525 UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit
2850 UCHAR ucVoltageControlAddress; 3526 UCHAR ucDefaultVoltageEntry;
2851 UCHAR ucVoltageControlOffset; 3527 UCHAR ucVoltageControlI2cLine;
2852 USHORT usGpioPin_AIndex; /* GPIO_PAD register index */ 3528 UCHAR ucVoltageControlAddress;
2853 UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */ 3529 UCHAR ucVoltageControlOffset;
2854 UCHAR ucReserved; 3530}ATOM_VOLTAGE_INFO_HEADER;
2855} ATOM_VOLTAGE_CONTROL; 3531
2856 3532typedef struct _ATOM_VOLTAGE_INFO
2857/* Define ucVoltageControlId */ 3533{
3534 ATOM_COMMON_TABLE_HEADER sHeader;
3535 ATOM_VOLTAGE_INFO_HEADER viHeader;
3536 UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
3537}ATOM_VOLTAGE_INFO;
3538
3539
3540typedef struct _ATOM_VOLTAGE_FORMULA
3541{
3542 USHORT usVoltageBaseLevel; // In number of 1mv unit
3543 USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit
3544 UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
3545 UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv
3546 UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
3547 UCHAR ucReserved;
3548 UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
3549}ATOM_VOLTAGE_FORMULA;
3550
3551typedef struct _VOLTAGE_LUT_ENTRY
3552{
3553 USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code
3554 USHORT usVoltageValue; // The corresponding Voltage Value, in mV
3555}VOLTAGE_LUT_ENTRY;
3556
3557typedef struct _ATOM_VOLTAGE_FORMULA_V2
3558{
3559 UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
3560 UCHAR ucReserved[3];
3561 VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
3562}ATOM_VOLTAGE_FORMULA_V2;
3563
3564typedef struct _ATOM_VOLTAGE_CONTROL
3565{
3566 UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine
3567 UCHAR ucVoltageControlI2cLine;
3568 UCHAR ucVoltageControlAddress;
3569 UCHAR ucVoltageControlOffset;
3570 USHORT usGpioPin_AIndex; //GPIO_PAD register index
3571 UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff
3572 UCHAR ucReserved;
3573}ATOM_VOLTAGE_CONTROL;
3574
3575// Define ucVoltageControlId
2858#define VOLTAGE_CONTROLLED_BY_HW 0x00 3576#define VOLTAGE_CONTROLLED_BY_HW 0x00
2859#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F 3577#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
2860#define VOLTAGE_CONTROLLED_BY_GPIO 0x80 3578#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
2861#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */ 3579#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage
2862#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */ 3580#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
2863#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */ 3581#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
2864#define VOLTAGE_CONTROL_ID_DS4402 0x04 3582#define VOLTAGE_CONTROL_ID_DS4402 0x04
2865 3583
2866typedef struct _ATOM_VOLTAGE_OBJECT { 3584typedef struct _ATOM_VOLTAGE_OBJECT
2867 UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */ 3585{
2868 UCHAR ucSize; /* Size of Object */ 3586 UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
2869 ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */ 3587 UCHAR ucSize; //Size of Object
2870 ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */ 3588 ATOM_VOLTAGE_CONTROL asControl; //describ how to control
2871} ATOM_VOLTAGE_OBJECT; 3589 ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID
2872 3590}ATOM_VOLTAGE_OBJECT;
2873typedef struct _ATOM_VOLTAGE_OBJECT_INFO { 3591
2874 ATOM_COMMON_TABLE_HEADER sHeader; 3592typedef struct _ATOM_VOLTAGE_OBJECT_V2
2875 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */ 3593{
2876} ATOM_VOLTAGE_OBJECT_INFO; 3594 UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
2877 3595 UCHAR ucSize; //Size of Object
2878typedef struct _ATOM_LEAKID_VOLTAGE { 3596 ATOM_VOLTAGE_CONTROL asControl; //describ how to control
2879 UCHAR ucLeakageId; 3597 ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID
2880 UCHAR ucReserved; 3598}ATOM_VOLTAGE_OBJECT_V2;
2881 USHORT usVoltage; 3599
2882} ATOM_LEAKID_VOLTAGE; 3600typedef struct _ATOM_VOLTAGE_OBJECT_INFO
2883 3601{
2884typedef struct _ATOM_ASIC_PROFILE_VOLTAGE { 3602 ATOM_COMMON_TABLE_HEADER sHeader;
2885 UCHAR ucProfileId; 3603 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control
2886 UCHAR ucReserved; 3604}ATOM_VOLTAGE_OBJECT_INFO;
2887 USHORT usSize; 3605
2888 USHORT usEfuseSpareStartAddr; 3606typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2
2889 USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */ 3607{
2890 ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */ 3608 ATOM_COMMON_TABLE_HEADER sHeader;
2891} ATOM_ASIC_PROFILE_VOLTAGE; 3609 ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control
2892 3610}ATOM_VOLTAGE_OBJECT_INFO_V2;
2893/* ucProfileId */ 3611
2894#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 3612typedef struct _ATOM_LEAKID_VOLTAGE
3613{
3614 UCHAR ucLeakageId;
3615 UCHAR ucReserved;
3616 USHORT usVoltage;
3617}ATOM_LEAKID_VOLTAGE;
3618
3619typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
3620{
3621 UCHAR ucProfileId;
3622 UCHAR ucReserved;
3623 USHORT usSize;
3624 USHORT usEfuseSpareStartAddr;
3625 USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
3626 ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage
3627}ATOM_ASIC_PROFILE_VOLTAGE;
3628
3629//ucProfileId
3630#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
2895#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 3631#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
2896#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 3632#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
2897 3633
2898typedef struct _ATOM_ASIC_PROFILING_INFO { 3634typedef struct _ATOM_ASIC_PROFILING_INFO
2899 ATOM_COMMON_TABLE_HEADER asHeader; 3635{
2900 ATOM_ASIC_PROFILE_VOLTAGE asVoltage; 3636 ATOM_COMMON_TABLE_HEADER asHeader;
2901} ATOM_ASIC_PROFILING_INFO; 3637 ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
2902 3638}ATOM_ASIC_PROFILING_INFO;
2903typedef struct _ATOM_POWER_SOURCE_OBJECT { 3639
2904 UCHAR ucPwrSrcId; /* Power source */ 3640typedef struct _ATOM_POWER_SOURCE_OBJECT
2905 UCHAR ucPwrSensorType; /* GPIO, I2C or none */ 3641{
2906 UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */ 3642 UCHAR ucPwrSrcId; // Power source
2907 UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */ 3643 UCHAR ucPwrSensorType; // GPIO, I2C or none
2908 UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */ 3644 UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id
2909 UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */ 3645 UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect
2910 UCHAR ucPwrSensActiveState; /* high active or low active */ 3646 UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect
2911 UCHAR ucReserve[3]; /* reserve */ 3647 UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect
2912 USHORT usSensPwr; /* in unit of watt */ 3648 UCHAR ucPwrSensActiveState; // high active or low active
2913} ATOM_POWER_SOURCE_OBJECT; 3649 UCHAR ucReserve[3]; // reserve
2914 3650 USHORT usSensPwr; // in unit of watt
2915typedef struct _ATOM_POWER_SOURCE_INFO { 3651}ATOM_POWER_SOURCE_OBJECT;
2916 ATOM_COMMON_TABLE_HEADER asHeader; 3652
2917 UCHAR asPwrbehave[16]; 3653typedef struct _ATOM_POWER_SOURCE_INFO
2918 ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; 3654{
2919} ATOM_POWER_SOURCE_INFO; 3655 ATOM_COMMON_TABLE_HEADER asHeader;
2920 3656 UCHAR asPwrbehave[16];
2921/* Define ucPwrSrcId */ 3657 ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
3658}ATOM_POWER_SOURCE_INFO;
3659
3660
3661//Define ucPwrSrcId
2922#define POWERSOURCE_PCIE_ID1 0x00 3662#define POWERSOURCE_PCIE_ID1 0x00
2923#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 3663#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
2924#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 3664#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
2925#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 3665#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
2926#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 3666#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
2927 3667
2928/* define ucPwrSensorId */ 3668//define ucPwrSensorId
2929#define POWER_SENSOR_ALWAYS 0x00 3669#define POWER_SENSOR_ALWAYS 0x00
2930#define POWER_SENSOR_GPIO 0x01 3670#define POWER_SENSOR_GPIO 0x01
2931#define POWER_SENSOR_I2C 0x02 3671#define POWER_SENSOR_I2C 0x02
2932 3672
3673typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
3674{
3675 ATOM_COMMON_TABLE_HEADER sHeader;
3676 ULONG ulBootUpEngineClock;
3677 ULONG ulDentistVCOFreq;
3678 ULONG ulBootUpUMAClock;
3679 ULONG ulReserved1[8];
3680 ULONG ulBootUpReqDisplayVector;
3681 ULONG ulOtherDisplayMisc;
3682 ULONG ulGPUCapInfo;
3683 ULONG ulReserved2[3];
3684 ULONG ulSystemConfig;
3685 ULONG ulCPUCapInfo;
3686 USHORT usMaxNBVoltage;
3687 USHORT usMinNBVoltage;
3688 USHORT usBootUpNBVoltage;
3689 USHORT usExtDispConnInfoOffset;
3690 UCHAR ucHtcTmpLmt;
3691 UCHAR ucTjOffset;
3692 UCHAR ucMemoryType;
3693 UCHAR ucUMAChannelNumber;
3694 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
3695 ULONG ulCSR_M3_ARB_CNTL_UVD[10];
3696 ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
3697 ULONG ulReserved3[42];
3698 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
3699}ATOM_INTEGRATED_SYSTEM_INFO_V6;
3700
3701/**********************************************************************************************************************
3702// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
3703//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
3704//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
3705//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
3706//ulReserved1[8] Reserved by now, must be 0x0.
3707//ulBootUpReqDisplayVector VBIOS boot up display IDs
3708// ATOM_DEVICE_CRT1_SUPPORT 0x0001
3709// ATOM_DEVICE_CRT2_SUPPORT 0x0010
3710// ATOM_DEVICE_DFP1_SUPPORT 0x0008
3711// ATOM_DEVICE_DFP6_SUPPORT 0x0040
3712// ATOM_DEVICE_DFP2_SUPPORT 0x0080
3713// ATOM_DEVICE_DFP3_SUPPORT 0x0200
3714// ATOM_DEVICE_DFP4_SUPPORT 0x0400
3715// ATOM_DEVICE_DFP5_SUPPORT 0x0800
3716// ATOM_DEVICE_LCD1_SUPPORT 0x0002
3717//ulOtherDisplayMisc Other display related flags, not defined yet.
3718//ulGPUCapInfo TBD
3719//ulReserved2[3] must be 0x0 for the reserved.
3720//ulSystemConfig TBD
3721//ulCPUCapInfo TBD
3722//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
3723//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
3724//usBootUpNBVoltage Boot up NB voltage in unit of mv.
3725//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
3726//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
3727//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
3728//ucUMAChannelNumber System memory channel numbers.
3729//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
3730//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
3731//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
3732//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
3733**********************************************************************************************************************/
3734
2933/**************************************************************************/ 3735/**************************************************************************/
2934/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */ 3736// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
2935/* Memory SS Info Table */ 3737//Memory SS Info Table
2936/* Define Memory Clock SS chip ID */ 3738//Define Memory Clock SS chip ID
2937#define ICS91719 1 3739#define ICS91719 1
2938#define ICS91720 2 3740#define ICS91720 2
2939 3741
2940/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */ 3742//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
2941typedef struct _ATOM_I2C_DATA_RECORD { 3743typedef struct _ATOM_I2C_DATA_RECORD
2942 UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */ 3744{
2943 UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */ 3745 UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
2944} ATOM_I2C_DATA_RECORD; 3746 UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
2945 3747}ATOM_I2C_DATA_RECORD;
2946/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */ 3748
2947typedef struct _ATOM_I2C_DEVICE_SETUP_INFO { 3749
2948 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */ 3750//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
2949 UCHAR ucSSChipID; /* SS chip being used */ 3751typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
2950 UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */ 3752{
2951 UCHAR ucNumOfI2CDataRecords; /* number of data block */ 3753 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap.
2952 ATOM_I2C_DATA_RECORD asI2CData[1]; 3754 UCHAR ucSSChipID; //SS chip being used
2953} ATOM_I2C_DEVICE_SETUP_INFO; 3755 UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
2954 3756 UCHAR ucNumOfI2CDataRecords; //number of data block
2955/* ========================================================================================== */ 3757 ATOM_I2C_DATA_RECORD asI2CData[1];
2956typedef struct _ATOM_ASIC_MVDD_INFO { 3758}ATOM_I2C_DEVICE_SETUP_INFO;
2957 ATOM_COMMON_TABLE_HEADER sHeader; 3759
2958 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; 3760//==========================================================================================
2959} ATOM_ASIC_MVDD_INFO; 3761typedef struct _ATOM_ASIC_MVDD_INFO
2960 3762{
2961/* ========================================================================================== */ 3763 ATOM_COMMON_TABLE_HEADER sHeader;
3764 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
3765}ATOM_ASIC_MVDD_INFO;
3766
3767//==========================================================================================
2962#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO 3768#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
2963 3769
2964/* ========================================================================================== */ 3770//==========================================================================================
2965/**************************************************************************/ 3771/**************************************************************************/
2966 3772
2967typedef struct _ATOM_ASIC_SS_ASSIGNMENT { 3773typedef struct _ATOM_ASIC_SS_ASSIGNMENT
2968 ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */ 3774{
2969 USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */ 3775 ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz
2970 USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */ 3776 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
2971 UCHAR ucClockIndication; /* Indicate which clock source needs SS */ 3777 USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq
2972 UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */ 3778 UCHAR ucClockIndication; //Indicate which clock source needs SS
2973 UCHAR ucReserved[2]; 3779 UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread.
2974} ATOM_ASIC_SS_ASSIGNMENT; 3780 UCHAR ucReserved[2];
2975 3781}ATOM_ASIC_SS_ASSIGNMENT;
2976/* Define ucSpreadSpectrumType */ 3782
3783//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
3784//SS is not required or enabled if a match is not found.
2977#define ASIC_INTERNAL_MEMORY_SS 1 3785#define ASIC_INTERNAL_MEMORY_SS 1
2978#define ASIC_INTERNAL_ENGINE_SS 2 3786#define ASIC_INTERNAL_ENGINE_SS 2
2979#define ASIC_INTERNAL_UVD_SS 3 3787#define ASIC_INTERNAL_UVD_SS 3
3788#define ASIC_INTERNAL_SS_ON_TMDS 4
3789#define ASIC_INTERNAL_SS_ON_HDMI 5
3790#define ASIC_INTERNAL_SS_ON_LVDS 6
3791#define ASIC_INTERNAL_SS_ON_DP 7
3792#define ASIC_INTERNAL_SS_ON_DCPLL 8
3793
3794typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
3795{
3796 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
3797 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
3798 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
3799 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
3800 UCHAR ucClockIndication; //Indicate which clock source needs SS
3801 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
3802 UCHAR ucReserved[2];
3803}ATOM_ASIC_SS_ASSIGNMENT_V2;
3804
3805//ucSpreadSpectrumMode
3806//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
3807//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
3808//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
3809//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
3810//#define ATOM_INTERNAL_SS_MASK 0x00000000
3811//#define ATOM_EXTERNAL_SS_MASK 0x00000002
3812
3813typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
3814{
3815 ATOM_COMMON_TABLE_HEADER sHeader;
3816 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
3817}ATOM_ASIC_INTERNAL_SS_INFO;
2980 3818
2981typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { 3819typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
2982 ATOM_COMMON_TABLE_HEADER sHeader; 3820{
2983 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; 3821 ATOM_COMMON_TABLE_HEADER sHeader;
2984} ATOM_ASIC_INTERNAL_SS_INFO; 3822 ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
3823}ATOM_ASIC_INTERNAL_SS_INFO_V2;
2985 3824
2986/* ==============================Scratch Pad Definition Portion=============================== */ 3825typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
3826{
3827 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
3828 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
3829 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
3830 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
3831 UCHAR ucClockIndication; //Indicate which clock source needs SS
3832 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
3833 UCHAR ucReserved[2];
3834}ATOM_ASIC_SS_ASSIGNMENT_V3;
3835
3836typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
3837{
3838 ATOM_COMMON_TABLE_HEADER sHeader;
3839 ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
3840}ATOM_ASIC_INTERNAL_SS_INFO_V3;
3841
3842
3843//==============================Scratch Pad Definition Portion===============================
2987#define ATOM_DEVICE_CONNECT_INFO_DEF 0 3844#define ATOM_DEVICE_CONNECT_INFO_DEF 0
2988#define ATOM_ROM_LOCATION_DEF 1 3845#define ATOM_ROM_LOCATION_DEF 1
2989#define ATOM_TV_STANDARD_DEF 2 3846#define ATOM_TV_STANDARD_DEF 2
@@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
2995#define ATOM_I2C_CHANNEL_STATUS_DEF 8 3852#define ATOM_I2C_CHANNEL_STATUS_DEF 8
2996#define ATOM_I2C_CHANNEL_STATUS1_DEF 9 3853#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
2997 3854
2998/* BIOS_0_SCRATCH Definition */ 3855
3856// BIOS_0_SCRATCH Definition
2999#define ATOM_S0_CRT1_MONO 0x00000001L 3857#define ATOM_S0_CRT1_MONO 0x00000001L
3000#define ATOM_S0_CRT1_COLOR 0x00000002L 3858#define ATOM_S0_CRT1_COLOR 0x00000002L
3001#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) 3859#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
@@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3008#define ATOM_S0_CV_DIN_A 0x00000020L 3866#define ATOM_S0_CV_DIN_A 0x00000020L
3009#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) 3867#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
3010 3868
3869
3011#define ATOM_S0_CRT2_MONO 0x00000100L 3870#define ATOM_S0_CRT2_MONO 0x00000100L
3012#define ATOM_S0_CRT2_COLOR 0x00000200L 3871#define ATOM_S0_CRT2_COLOR 0x00000200L
3013#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) 3872#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
@@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3025#define ATOM_S0_DFP2 0x00020000L 3884#define ATOM_S0_DFP2 0x00020000L
3026#define ATOM_S0_LCD1 0x00040000L 3885#define ATOM_S0_LCD1 0x00040000L
3027#define ATOM_S0_LCD2 0x00080000L 3886#define ATOM_S0_LCD2 0x00080000L
3028#define ATOM_S0_TV2 0x00100000L 3887#define ATOM_S0_DFP6 0x00100000L
3029#define ATOM_S0_DFP3 0x00200000L 3888#define ATOM_S0_DFP3 0x00200000L
3030#define ATOM_S0_DFP4 0x00400000L 3889#define ATOM_S0_DFP4 0x00400000L
3031#define ATOM_S0_DFP5 0x00800000L 3890#define ATOM_S0_DFP5 0x00800000L
3032 3891
3033#define ATOM_S0_DFP_MASK \ 3892#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
3034 (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
3035 3893
3036#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */ 3894#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with
3037 /* the FAD/HDP reg access bug. Bit is read by DAL */ 3895 // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx
3038 3896
3039#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L 3897#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
3040#define ATOM_S0_THERMAL_STATE_SHIFT 26 3898#define ATOM_S0_THERMAL_STATE_SHIFT 26
3041 3899
3042#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L 3900#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
3043#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 3901#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
3044 3902
3045#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 3903#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
3046#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 3904#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
3047#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 3905#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
3048 3906
3049/* Byte aligned definition for BIOS usage */ 3907//Byte aligned defintion for BIOS usage
3050#define ATOM_S0_CRT1_MONOb0 0x01 3908#define ATOM_S0_CRT1_MONOb0 0x01
3051#define ATOM_S0_CRT1_COLORb0 0x02 3909#define ATOM_S0_CRT1_COLORb0 0x02
3052#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) 3910#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3076#define ATOM_S0_DFP2b2 0x02 3934#define ATOM_S0_DFP2b2 0x02
3077#define ATOM_S0_LCD1b2 0x04 3935#define ATOM_S0_LCD1b2 0x04
3078#define ATOM_S0_LCD2b2 0x08 3936#define ATOM_S0_LCD2b2 0x08
3079#define ATOM_S0_TV2b2 0x10 3937#define ATOM_S0_DFP6b2 0x10
3080#define ATOM_S0_DFP3b2 0x20 3938#define ATOM_S0_DFP3b2 0x20
3939#define ATOM_S0_DFP4b2 0x40
3940#define ATOM_S0_DFP5b2 0x80
3941
3081 3942
3082#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C 3943#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
3083#define ATOM_S0_THERMAL_STATE_SHIFTb3 2 3944#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
@@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3085#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 3946#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
3086#define ATOM_S0_LCD1_SHIFT 18 3947#define ATOM_S0_LCD1_SHIFT 18
3087 3948
3088/* BIOS_1_SCRATCH Definition */ 3949// BIOS_1_SCRATCH Definition
3089#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL 3950#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
3090#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L 3951#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
3091 3952
3092/* BIOS_2_SCRATCH Definition */ 3953// BIOS_2_SCRATCH Definition
3093#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL 3954#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
3094#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L 3955#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
3095#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 3956#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
3096 3957
3097#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
3098#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
3099#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
3100#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
3101#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
3102#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
3103#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
3104#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
3105#define ATOM_S2_CV_DPMS_STATE 0x01000000L
3106#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
3107#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
3108#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
3109
3110#define ATOM_S2_DFP_DPM_STATE \
3111 (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
3112 ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
3113 ATOM_S2_DFP5_DPMS_STATE)
3114
3115#define ATOM_S2_DEVICE_DPMS_STATE \
3116 (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
3117 ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
3118 ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
3119 ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
3120
3121#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L 3958#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
3122#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 3959#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
3123#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L 3960#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
3124 3961
3962#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L
3125#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L 3963#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
3126 3964
3127#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 3965#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
@@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3131#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 3969#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
3132#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L 3970#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
3133 3971
3134/* Byte aligned definition for BIOS usage */ 3972
3973//Byte aligned defintion for BIOS usage
3135#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F 3974#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
3136#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF 3975#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
3137#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 3976#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
3138#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
3139#define ATOM_S2_TV1_DPMS_STATEb2 0x04
3140#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
3141#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
3142#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
3143#define ATOM_S2_TV2_DPMS_STATEb2 0x40
3144#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
3145#define ATOM_S2_CV_DPMS_STATEb3 0x01
3146#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
3147#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
3148#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
3149 3977
3150#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF 3978#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
3151#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C 3979#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
@@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3153#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 3981#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
3154#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 3982#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
3155 3983
3156/* BIOS_3_SCRATCH Definition */ 3984
3985// BIOS_3_SCRATCH Definition
3157#define ATOM_S3_CRT1_ACTIVE 0x00000001L 3986#define ATOM_S3_CRT1_ACTIVE 0x00000001L
3158#define ATOM_S3_LCD1_ACTIVE 0x00000002L 3987#define ATOM_S3_LCD1_ACTIVE 0x00000002L
3159#define ATOM_S3_TV1_ACTIVE 0x00000004L 3988#define ATOM_S3_TV1_ACTIVE 0x00000004L
3160#define ATOM_S3_DFP1_ACTIVE 0x00000008L 3989#define ATOM_S3_DFP1_ACTIVE 0x00000008L
3161#define ATOM_S3_CRT2_ACTIVE 0x00000010L 3990#define ATOM_S3_CRT2_ACTIVE 0x00000010L
3162#define ATOM_S3_LCD2_ACTIVE 0x00000020L 3991#define ATOM_S3_LCD2_ACTIVE 0x00000020L
3163#define ATOM_S3_TV2_ACTIVE 0x00000040L 3992#define ATOM_S3_DFP6_ACTIVE 0x00000040L
3164#define ATOM_S3_DFP2_ACTIVE 0x00000080L 3993#define ATOM_S3_DFP2_ACTIVE 0x00000080L
3165#define ATOM_S3_CV_ACTIVE 0x00000100L 3994#define ATOM_S3_CV_ACTIVE 0x00000100L
3166#define ATOM_S3_DFP3_ACTIVE 0x00000200L 3995#define ATOM_S3_DFP3_ACTIVE 0x00000200L
3167#define ATOM_S3_DFP4_ACTIVE 0x00000400L 3996#define ATOM_S3_DFP4_ACTIVE 0x00000400L
3168#define ATOM_S3_DFP5_ACTIVE 0x00000800L 3997#define ATOM_S3_DFP5_ACTIVE 0x00000800L
3169 3998
3170#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL 3999#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
3171 4000
3172#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L 4001#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
3173#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L 4002#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
@@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3178#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L 4007#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
3179#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L 4008#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
3180#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L 4009#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
3181#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L 4010#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
3182#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L 4011#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
3183#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L 4012#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
3184#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L 4013#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
@@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3187 4016
3188#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L 4017#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
3189#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L 4018#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
4019//Below two definitions are not supported in pplib, but in the old powerplay in DAL
3190#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L 4020#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
3191#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L 4021#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
3192 4022
3193/* Byte aligned definition for BIOS usage */ 4023//Byte aligned defintion for BIOS usage
3194#define ATOM_S3_CRT1_ACTIVEb0 0x01 4024#define ATOM_S3_CRT1_ACTIVEb0 0x01
3195#define ATOM_S3_LCD1_ACTIVEb0 0x02 4025#define ATOM_S3_LCD1_ACTIVEb0 0x02
3196#define ATOM_S3_TV1_ACTIVEb0 0x04 4026#define ATOM_S3_TV1_ACTIVEb0 0x04
3197#define ATOM_S3_DFP1_ACTIVEb0 0x08 4027#define ATOM_S3_DFP1_ACTIVEb0 0x08
3198#define ATOM_S3_CRT2_ACTIVEb0 0x10 4028#define ATOM_S3_CRT2_ACTIVEb0 0x10
3199#define ATOM_S3_LCD2_ACTIVEb0 0x20 4029#define ATOM_S3_LCD2_ACTIVEb0 0x20
3200#define ATOM_S3_TV2_ACTIVEb0 0x40 4030#define ATOM_S3_DFP6_ACTIVEb0 0x40
3201#define ATOM_S3_DFP2_ACTIVEb0 0x80 4031#define ATOM_S3_DFP2_ACTIVEb0 0x80
3202#define ATOM_S3_CV_ACTIVEb1 0x01 4032#define ATOM_S3_CV_ACTIVEb1 0x01
3203#define ATOM_S3_DFP3_ACTIVEb1 0x02 4033#define ATOM_S3_DFP3_ACTIVEb1 0x02
@@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3212#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 4042#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
3213#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 4043#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
3214#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 4044#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
3215#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40 4045#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
3216#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 4046#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
3217#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 4047#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
3218#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 4048#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
@@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3221 4051
3222#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF 4052#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
3223 4053
3224#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 4054// BIOS_4_SCRATCH Definition
3225#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
3226#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
3227
3228/* BIOS_4_SCRATCH Definition */
3229#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL 4055#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
3230#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L 4056#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
3231#define ATOM_S4_LCD1_REFRESH_SHIFT 8 4057#define ATOM_S4_LCD1_REFRESH_SHIFT 8
3232 4058
3233/* Byte aligned definition for BIOS usage */ 4059//Byte aligned defintion for BIOS usage
3234#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF 4060#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
3235#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 4061#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
3236#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 4062#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
3237 4063
3238/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */ 4064// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
3239#define ATOM_S5_DOS_REQ_CRT1b0 0x01 4065#define ATOM_S5_DOS_REQ_CRT1b0 0x01
3240#define ATOM_S5_DOS_REQ_LCD1b0 0x02 4066#define ATOM_S5_DOS_REQ_LCD1b0 0x02
3241#define ATOM_S5_DOS_REQ_TV1b0 0x04 4067#define ATOM_S5_DOS_REQ_TV1b0 0x04
3242#define ATOM_S5_DOS_REQ_DFP1b0 0x08 4068#define ATOM_S5_DOS_REQ_DFP1b0 0x08
3243#define ATOM_S5_DOS_REQ_CRT2b0 0x10 4069#define ATOM_S5_DOS_REQ_CRT2b0 0x10
3244#define ATOM_S5_DOS_REQ_LCD2b0 0x20 4070#define ATOM_S5_DOS_REQ_LCD2b0 0x20
3245#define ATOM_S5_DOS_REQ_TV2b0 0x40 4071#define ATOM_S5_DOS_REQ_DFP6b0 0x40
3246#define ATOM_S5_DOS_REQ_DFP2b0 0x80 4072#define ATOM_S5_DOS_REQ_DFP2b0 0x80
3247#define ATOM_S5_DOS_REQ_CVb1 0x01 4073#define ATOM_S5_DOS_REQ_CVb1 0x01
3248#define ATOM_S5_DOS_REQ_DFP3b1 0x02 4074#define ATOM_S5_DOS_REQ_DFP3b1 0x02
3249#define ATOM_S5_DOS_REQ_DFP4b1 0x04 4075#define ATOM_S5_DOS_REQ_DFP4b1 0x04
3250#define ATOM_S5_DOS_REQ_DFP5b1 0x08 4076#define ATOM_S5_DOS_REQ_DFP5b1 0x08
3251 4077
3252#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF 4078#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF
3253 4079
3254#define ATOM_S5_DOS_REQ_CRT1 0x0001 4080#define ATOM_S5_DOS_REQ_CRT1 0x0001
3255#define ATOM_S5_DOS_REQ_LCD1 0x0002 4081#define ATOM_S5_DOS_REQ_LCD1 0x0002
@@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3257#define ATOM_S5_DOS_REQ_DFP1 0x0008 4083#define ATOM_S5_DOS_REQ_DFP1 0x0008
3258#define ATOM_S5_DOS_REQ_CRT2 0x0010 4084#define ATOM_S5_DOS_REQ_CRT2 0x0010
3259#define ATOM_S5_DOS_REQ_LCD2 0x0020 4085#define ATOM_S5_DOS_REQ_LCD2 0x0020
3260#define ATOM_S5_DOS_REQ_TV2 0x0040 4086#define ATOM_S5_DOS_REQ_DFP6 0x0040
3261#define ATOM_S5_DOS_REQ_DFP2 0x0080 4087#define ATOM_S5_DOS_REQ_DFP2 0x0080
3262#define ATOM_S5_DOS_REQ_CV 0x0100 4088#define ATOM_S5_DOS_REQ_CV 0x0100
3263#define ATOM_S5_DOS_REQ_DFP3 0x0200 4089#define ATOM_S5_DOS_REQ_DFP3 0x0200
3264#define ATOM_S5_DOS_REQ_DFP4 0x0400 4090#define ATOM_S5_DOS_REQ_DFP4 0x0400
3265#define ATOM_S5_DOS_REQ_DFP5 0x0800 4091#define ATOM_S5_DOS_REQ_DFP5 0x0800
3266 4092
3267#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 4093#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
3268#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 4094#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
3269#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 4095#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
3270#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 4096#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
3271#define ATOM_S5_DOS_FORCE_DEVICEw1 \ 4097#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
3272 (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \ 4098 (ATOM_S5_DOS_FORCE_CVb3<<8))
3273 ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
3274 4099
3275/* BIOS_6_SCRATCH Definition */ 4100// BIOS_6_SCRATCH Definition
3276#define ATOM_S6_DEVICE_CHANGE 0x00000001L 4101#define ATOM_S6_DEVICE_CHANGE 0x00000001L
3277#define ATOM_S6_SCALER_CHANGE 0x00000002L 4102#define ATOM_S6_SCALER_CHANGE 0x00000002L
3278#define ATOM_S6_LID_CHANGE 0x00000004L 4103#define ATOM_S6_LID_CHANGE 0x00000004L
@@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3285#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L 4110#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
3286#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L 4111#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
3287#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L 4112#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
3288#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */ 4113#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD
3289#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */ 4114#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD
3290 4115
3291#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */ 4116#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
3292#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */ 4117#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
3293 4118
3294#define ATOM_S6_ACC_REQ_CRT1 0x00010000L 4119#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
3295#define ATOM_S6_ACC_REQ_LCD1 0x00020000L 4120#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
@@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3297#define ATOM_S6_ACC_REQ_DFP1 0x00080000L 4122#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
3298#define ATOM_S6_ACC_REQ_CRT2 0x00100000L 4123#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
3299#define ATOM_S6_ACC_REQ_LCD2 0x00200000L 4124#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
3300#define ATOM_S6_ACC_REQ_TV2 0x00400000L 4125#define ATOM_S6_ACC_REQ_DFP6 0x00400000L
3301#define ATOM_S6_ACC_REQ_DFP2 0x00800000L 4126#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
3302#define ATOM_S6_ACC_REQ_CV 0x01000000L 4127#define ATOM_S6_ACC_REQ_CV 0x01000000L
3303#define ATOM_S6_ACC_REQ_DFP3 0x02000000L 4128#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
@@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3310#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L 4135#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
3311#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L 4136#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
3312 4137
3313/* Byte aligned definition for BIOS usage */ 4138//Byte aligned defintion for BIOS usage
3314#define ATOM_S6_DEVICE_CHANGEb0 0x01 4139#define ATOM_S6_DEVICE_CHANGEb0 0x01
3315#define ATOM_S6_SCALER_CHANGEb0 0x02 4140#define ATOM_S6_SCALER_CHANGEb0 0x02
3316#define ATOM_S6_LID_CHANGEb0 0x04 4141#define ATOM_S6_LID_CHANGEb0 0x04
@@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3320#define ATOM_S6_LID_STATEb0 0x40 4145#define ATOM_S6_LID_STATEb0 0x40
3321#define ATOM_S6_DOCK_STATEb0 0x80 4146#define ATOM_S6_DOCK_STATEb0 0x80
3322#define ATOM_S6_CRITICAL_STATEb1 0x01 4147#define ATOM_S6_CRITICAL_STATEb1 0x01
3323#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 4148#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
3324#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 4149#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
3325#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 4150#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
3326#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 4151#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
3327#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 4152#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
3328 4153
3329#define ATOM_S6_ACC_REQ_CRT1b2 0x01 4154#define ATOM_S6_ACC_REQ_CRT1b2 0x01
3330#define ATOM_S6_ACC_REQ_LCD1b2 0x02 4155#define ATOM_S6_ACC_REQ_LCD1b2 0x02
@@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3332#define ATOM_S6_ACC_REQ_DFP1b2 0x08 4157#define ATOM_S6_ACC_REQ_DFP1b2 0x08
3333#define ATOM_S6_ACC_REQ_CRT2b2 0x10 4158#define ATOM_S6_ACC_REQ_CRT2b2 0x10
3334#define ATOM_S6_ACC_REQ_LCD2b2 0x20 4159#define ATOM_S6_ACC_REQ_LCD2b2 0x20
3335#define ATOM_S6_ACC_REQ_TV2b2 0x40 4160#define ATOM_S6_ACC_REQ_DFP6b2 0x40
3336#define ATOM_S6_ACC_REQ_DFP2b2 0x80 4161#define ATOM_S6_ACC_REQ_DFP2b2 0x80
3337#define ATOM_S6_ACC_REQ_CVb3 0x01 4162#define ATOM_S6_ACC_REQ_CVb3 0x01
3338#define ATOM_S6_ACC_REQ_DFP3b3 0x02 4163#define ATOM_S6_ACC_REQ_DFP3b3 0x02
3339#define ATOM_S6_ACC_REQ_DFP4b3 0x04 4164#define ATOM_S6_ACC_REQ_DFP4b3 0x04
3340#define ATOM_S6_ACC_REQ_DFP5b3 0x08 4165#define ATOM_S6_ACC_REQ_DFP5b3 0x08
3341 4166
3342#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 4167#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
3343#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 4168#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
@@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3366#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 4191#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
3367#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 4192#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
3368 4193
3369/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */ 4194// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
3370#define ATOM_S7_DOS_MODE_TYPEb0 0x03 4195#define ATOM_S7_DOS_MODE_TYPEb0 0x03
3371#define ATOM_S7_DOS_MODE_VGAb0 0x00 4196#define ATOM_S7_DOS_MODE_VGAb0 0x00
3372#define ATOM_S7_DOS_MODE_VESAb0 0x01 4197#define ATOM_S7_DOS_MODE_VESAb0 0x01
@@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3378 4203
3379#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 4204#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
3380 4205
3381/* BIOS_8_SCRATCH Definition */ 4206// BIOS_8_SCRATCH Definition
3382#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF 4207#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
3383#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 4208#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
3384 4209
3385#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 4210#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
3386#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 4211#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
3387 4212
3388/* BIOS_9_SCRATCH Definition */ 4213// BIOS_9_SCRATCH Definition
3389#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 4214#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
3390#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF 4215#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
3391#endif 4216#endif
3392#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK 4217#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
3393#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 4218#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
3394#endif 4219#endif
3395#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 4220#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
3396#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 4221#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
3397#endif 4222#endif
3398#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 4223#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
3399#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 4224#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
3400#endif 4225#endif
3401 4226
4227
3402#define ATOM_FLAG_SET 0x20 4228#define ATOM_FLAG_SET 0x20
3403#define ATOM_FLAG_CLEAR 0 4229#define ATOM_FLAG_CLEAR 0
3404#define CLEAR_ATOM_S6_ACC_MODE \ 4230#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
3405 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ 4231#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3406 ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) 4232#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3407#define SET_ATOM_S6_DEVICE_CHANGE \ 4233#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3408 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ 4234#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3409 ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3410#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
3411 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3412 ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3413#define SET_ATOM_S6_SCALER_CHANGE \
3414 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3415 ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3416#define SET_ATOM_S6_LID_CHANGE \
3417 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3418 ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3419
3420#define SET_ATOM_S6_LID_STATE \
3421 ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
3422 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3423#define CLEAR_ATOM_S6_LID_STATE \
3424 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3425 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
3426
3427#define SET_ATOM_S6_DOCK_CHANGE \
3428 ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
3429 ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
3430#define SET_ATOM_S6_DOCK_STATE \
3431 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3432 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
3433#define CLEAR_ATOM_S6_DOCK_STATE \
3434 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3435 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
3436
3437#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
3438 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3439 ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
3440#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
3441 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3442 ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
3443#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
3444 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3445 ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
3446
3447#define SET_ATOM_S6_CRITICAL_STATE \
3448 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3449 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
3450#define CLEAR_ATOM_S6_CRITICAL_STATE \
3451 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3452 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
3453
3454#define SET_ATOM_S6_REQ_SCALER \
3455 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3456 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
3457#define CLEAR_ATOM_S6_REQ_SCALER \
3458 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3459 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
3460
3461#define SET_ATOM_S6_REQ_SCALER_ARATIO \
3462 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3463 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
3464#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
3465 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3466 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
3467
3468#define SET_ATOM_S6_I2C_STATE_CHANGE \
3469 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3470 ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3471
3472#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
3473 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3474 ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3475
3476#define SET_ATOM_S6_DEVICE_RECONFIG \
3477 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3478 ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
3479#define CLEAR_ATOM_S0_LCD1 \
3480 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
3481 ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
3482#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
3483 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3484 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
3485#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
3486 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3487 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
3488 4235
3489/****************************************************************************/ 4236#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3490/* Portion II: Definitinos only used in Driver */ 4237#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
4238
4239#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
4240#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
4241#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
4242
4243#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
4244#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
4245#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
4246
4247#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
4248#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
4249
4250#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
4251#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
4252
4253#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
4254#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
4255
4256#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
4257
4258#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
4259
4260#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
4261#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
4262#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
4263#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
4264
4265/****************************************************************************/
4266//Portion II: Definitinos only used in Driver
3491/****************************************************************************/ 4267/****************************************************************************/
3492 4268
3493/* Macros used by driver */ 4269// Macros used by driver
4270#ifdef __cplusplus
4271#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
3494 4272
3495#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT)) 4273#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
4274#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
4275#else // not __cplusplus
4276#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
3496 4277
3497#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) 4278#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
3498#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) 4279#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
4280#endif // __cplusplus
3499 4281
3500#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION 4282#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
3501#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION 4283#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
3502 4284
3503/****************************************************************************/ 4285/****************************************************************************/
3504/* Portion III: Definitinos only used in VBIOS */ 4286//Portion III: Definitinos only used in VBIOS
3505/****************************************************************************/ 4287/****************************************************************************/
3506#define ATOM_DAC_SRC 0x80 4288#define ATOM_DAC_SRC 0x80
3507#define ATOM_SRC_DAC1 0 4289#define ATOM_SRC_DAC1 0
3508#define ATOM_SRC_DAC2 0x80 4290#define ATOM_SRC_DAC2 0x80
3509 4291
3510#ifdef UEFI_BUILD 4292typedef struct _MEMORY_PLLINIT_PARAMETERS
3511#define USHORT UTEMP 4293{
3512#endif 4294 ULONG ulTargetMemoryClock; //In 10Khz unit
3513 4295 UCHAR ucAction; //not define yet
3514typedef struct _MEMORY_PLLINIT_PARAMETERS { 4296 UCHAR ucFbDiv_Hi; //Fbdiv Hi byte
3515 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 4297 UCHAR ucFbDiv; //FB value
3516 UCHAR ucAction; /* not define yet */ 4298 UCHAR ucPostDiv; //Post div
3517 UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */ 4299}MEMORY_PLLINIT_PARAMETERS;
3518 UCHAR ucFbDiv; /* FB value */
3519 UCHAR ucPostDiv; /* Post div */
3520} MEMORY_PLLINIT_PARAMETERS;
3521 4300
3522#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS 4301#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
3523 4302
3524#define GPIO_PIN_WRITE 0x01 4303
4304#define GPIO_PIN_WRITE 0x01
3525#define GPIO_PIN_READ 0x00 4305#define GPIO_PIN_READ 0x00
3526 4306
3527typedef struct _GPIO_PIN_CONTROL_PARAMETERS { 4307typedef struct _GPIO_PIN_CONTROL_PARAMETERS
3528 UCHAR ucGPIO_ID; /* return value, read from GPIO pins */ 4308{
3529 UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */ 4309 UCHAR ucGPIO_ID; //return value, read from GPIO pins
3530 UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */ 4310 UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update
3531 UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */ 4311 UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask
3532} GPIO_PIN_CONTROL_PARAMETERS; 4312 UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
3533 4313}GPIO_PIN_CONTROL_PARAMETERS;
3534typedef struct _ENABLE_SCALER_PARAMETERS { 4314
3535 UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */ 4315typedef struct _ENABLE_SCALER_PARAMETERS
3536 UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */ 4316{
3537 UCHAR ucTVStandard; /* */ 4317 UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2
3538 UCHAR ucPadding[1]; 4318 UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
3539} ENABLE_SCALER_PARAMETERS; 4319 UCHAR ucTVStandard; //
3540#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS 4320 UCHAR ucPadding[1];
3541 4321}ENABLE_SCALER_PARAMETERS;
3542/* ucEnable: */ 4322#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
4323
4324//ucEnable:
3543#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 4325#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
3544#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 4326#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
3545#define SCALER_ENABLE_2TAP_ALPHA_MODE 2 4327#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
3546#define SCALER_ENABLE_MULTITAP_MODE 3 4328#define SCALER_ENABLE_MULTITAP_MODE 3
3547 4329
3548typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS { 4330typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
3549 ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */ 4331{
3550 UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */ 4332 ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position
3551 UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */ 4333 UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset
3552 UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */ 4334 UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset
3553 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 4335 UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
3554} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; 4336 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
3555 4337}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
3556typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION { 4338
3557 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; 4339typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
3558 ENABLE_CRTC_PARAMETERS sReserved; 4340{
3559} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; 4341 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
3560 4342 ENABLE_CRTC_PARAMETERS sReserved;
3561typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS { 4343}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
3562 USHORT usHight; /* Image Hight */ 4344
3563 USHORT usWidth; /* Image Width */ 4345typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
3564 UCHAR ucSurface; /* Surface 1 or 2 */ 4346{
3565 UCHAR ucPadding[3]; 4347 USHORT usHight; // Image Hight
3566} ENABLE_GRAPH_SURFACE_PARAMETERS; 4348 USHORT usWidth; // Image Width
3567 4349 UCHAR ucSurface; // Surface 1 or 2
3568typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 { 4350 UCHAR ucPadding[3];
3569 USHORT usHight; /* Image Hight */ 4351}ENABLE_GRAPH_SURFACE_PARAMETERS;
3570 USHORT usWidth; /* Image Width */ 4352
3571 UCHAR ucSurface; /* Surface 1 or 2 */ 4353typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
3572 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 4354{
3573 UCHAR ucPadding[2]; 4355 USHORT usHight; // Image Hight
3574} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; 4356 USHORT usWidth; // Image Width
3575 4357 UCHAR ucSurface; // Surface 1 or 2
3576typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION { 4358 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
3577 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; 4359 UCHAR ucPadding[2];
3578 ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */ 4360}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
3579} ENABLE_GRAPH_SURFACE_PS_ALLOCATION; 4361
3580 4362typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
3581typedef struct _MEMORY_CLEAN_UP_PARAMETERS { 4363{
3582 USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */ 4364 USHORT usHight; // Image Hight
3583 USHORT usMemorySize; /* 8Kb blocks aligned */ 4365 USHORT usWidth; // Image Width
3584} MEMORY_CLEAN_UP_PARAMETERS; 4366 UCHAR ucSurface; // Surface 1 or 2
4367 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
4368 USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
4369}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
4370
4371typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
4372{
4373 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
4374 ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one
4375}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
4376
4377typedef struct _MEMORY_CLEAN_UP_PARAMETERS
4378{
4379 USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
4380 USHORT usMemorySize; //8Kb blocks aligned
4381}MEMORY_CLEAN_UP_PARAMETERS;
3585#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS 4382#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
3586 4383
3587typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS { 4384typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
3588 USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */ 4385{
3589 USHORT usY_Size; 4386 USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
3590} GET_DISPLAY_SURFACE_SIZE_PARAMETERS; 4387 USHORT usY_Size;
4388}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
3591 4389
3592typedef struct _INDIRECT_IO_ACCESS { 4390typedef struct _INDIRECT_IO_ACCESS
3593 ATOM_COMMON_TABLE_HEADER sHeader; 4391{
3594 UCHAR IOAccessSequence[256]; 4392 ATOM_COMMON_TABLE_HEADER sHeader;
4393 UCHAR IOAccessSequence[256];
3595} INDIRECT_IO_ACCESS; 4394} INDIRECT_IO_ACCESS;
3596 4395
3597#define INDIRECT_READ 0x00 4396#define INDIRECT_READ 0x00
@@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS {
3615#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ 4414#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
3616#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE 4415#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
3617 4416
3618typedef struct _ATOM_OEM_INFO { 4417typedef struct _ATOM_OEM_INFO
3619 ATOM_COMMON_TABLE_HEADER sHeader; 4418{
3620 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; 4419 ATOM_COMMON_TABLE_HEADER sHeader;
3621} ATOM_OEM_INFO; 4420 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
3622 4421}ATOM_OEM_INFO;
3623typedef struct _ATOM_TV_MODE { 4422
3624 UCHAR ucVMode_Num; /* Video mode number */ 4423typedef struct _ATOM_TV_MODE
3625 UCHAR ucTV_Mode_Num; /* Internal TV mode number */ 4424{
3626} ATOM_TV_MODE; 4425 UCHAR ucVMode_Num; //Video mode number
3627 4426 UCHAR ucTV_Mode_Num; //Internal TV mode number
3628typedef struct _ATOM_BIOS_INT_TVSTD_MODE { 4427}ATOM_TV_MODE;
3629 ATOM_COMMON_TABLE_HEADER sHeader; 4428
3630 USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */ 4429typedef struct _ATOM_BIOS_INT_TVSTD_MODE
3631 USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */ 4430{
3632 USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */ 4431 ATOM_COMMON_TABLE_HEADER sHeader;
3633 USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ 4432 USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table
3634 USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ 4433 USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table
3635} ATOM_BIOS_INT_TVSTD_MODE; 4434 USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table
3636 4435 USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
3637typedef struct _ATOM_TV_MODE_SCALER_PTR { 4436 USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
3638 USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */ 4437}ATOM_BIOS_INT_TVSTD_MODE;
3639 USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */ 4438
3640 UCHAR ucTV_Mode_Num; 4439
3641} ATOM_TV_MODE_SCALER_PTR; 4440typedef struct _ATOM_TV_MODE_SCALER_PTR
3642 4441{
3643typedef struct _ATOM_STANDARD_VESA_TIMING { 4442 USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients
3644 ATOM_COMMON_TABLE_HEADER sHeader; 4443 USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients
3645 ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */ 4444 UCHAR ucTV_Mode_Num;
3646} ATOM_STANDARD_VESA_TIMING; 4445}ATOM_TV_MODE_SCALER_PTR;
3647 4446
3648typedef struct _ATOM_STD_FORMAT { 4447typedef struct _ATOM_STANDARD_VESA_TIMING
3649 USHORT usSTD_HDisp; 4448{
3650 USHORT usSTD_VDisp; 4449 ATOM_COMMON_TABLE_HEADER sHeader;
3651 USHORT usSTD_RefreshRate; 4450 ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation
3652 USHORT usReserved; 4451}ATOM_STANDARD_VESA_TIMING;
3653} ATOM_STD_FORMAT; 4452
3654 4453
3655typedef struct _ATOM_VESA_TO_EXTENDED_MODE { 4454typedef struct _ATOM_STD_FORMAT
3656 USHORT usVESA_ModeNumber; 4455{
3657 USHORT usExtendedModeNumber; 4456 USHORT usSTD_HDisp;
3658} ATOM_VESA_TO_EXTENDED_MODE; 4457 USHORT usSTD_VDisp;
3659 4458 USHORT usSTD_RefreshRate;
3660typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT { 4459 USHORT usReserved;
3661 ATOM_COMMON_TABLE_HEADER sHeader; 4460}ATOM_STD_FORMAT;
3662 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; 4461
3663} ATOM_VESA_TO_INTENAL_MODE_LUT; 4462typedef struct _ATOM_VESA_TO_EXTENDED_MODE
4463{
4464 USHORT usVESA_ModeNumber;
4465 USHORT usExtendedModeNumber;
4466}ATOM_VESA_TO_EXTENDED_MODE;
4467
4468typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
4469{
4470 ATOM_COMMON_TABLE_HEADER sHeader;
4471 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
4472}ATOM_VESA_TO_INTENAL_MODE_LUT;
3664 4473
3665/*************** ATOM Memory Related Data Structure ***********************/ 4474/*************** ATOM Memory Related Data Structure ***********************/
3666typedef struct _ATOM_MEMORY_VENDOR_BLOCK { 4475typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
3667 UCHAR ucMemoryType; 4476 UCHAR ucMemoryType;
3668 UCHAR ucMemoryVendor; 4477 UCHAR ucMemoryVendor;
3669 UCHAR ucAdjMCId; 4478 UCHAR ucAdjMCId;
3670 UCHAR ucDynClkId; 4479 UCHAR ucDynClkId;
3671 ULONG ulDllResetClkRange; 4480 ULONG ulDllResetClkRange;
3672} ATOM_MEMORY_VENDOR_BLOCK; 4481}ATOM_MEMORY_VENDOR_BLOCK;
3673 4482
3674typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG { 4483
4484typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
3675#if ATOM_BIG_ENDIAN 4485#if ATOM_BIG_ENDIAN
3676 ULONG ucMemBlkId:8; 4486 ULONG ucMemBlkId:8;
3677 ULONG ulMemClockRange:24; 4487 ULONG ulMemClockRange:24;
3678#else 4488#else
3679 ULONG ulMemClockRange:24; 4489 ULONG ulMemClockRange:24;
3680 ULONG ucMemBlkId:8; 4490 ULONG ucMemBlkId:8;
3681#endif 4491#endif
3682} ATOM_MEMORY_SETTING_ID_CONFIG; 4492}ATOM_MEMORY_SETTING_ID_CONFIG;
3683 4493
3684typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS { 4494typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
3685 ATOM_MEMORY_SETTING_ID_CONFIG slAccess; 4495{
3686 ULONG ulAccess; 4496 ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
3687} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; 4497 ULONG ulAccess;
3688 4498}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
3689typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK { 4499
3690 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; 4500
3691 ULONG aulMemData[1]; 4501typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
3692} ATOM_MEMORY_SETTING_DATA_BLOCK; 4502 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
3693 4503 ULONG aulMemData[1];
3694typedef struct _ATOM_INIT_REG_INDEX_FORMAT { 4504}ATOM_MEMORY_SETTING_DATA_BLOCK;
3695 USHORT usRegIndex; /* MC register index */ 4505
3696 UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */ 4506
3697} ATOM_INIT_REG_INDEX_FORMAT; 4507typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
3698 4508 USHORT usRegIndex; // MC register index
3699typedef struct _ATOM_INIT_REG_BLOCK { 4509 UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
3700 USHORT usRegIndexTblSize; /* size of asRegIndexBuf */ 4510}ATOM_INIT_REG_INDEX_FORMAT;
3701 USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */ 4511
3702 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; 4512
3703 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; 4513typedef struct _ATOM_INIT_REG_BLOCK{
3704} ATOM_INIT_REG_BLOCK; 4514 USHORT usRegIndexTblSize; //size of asRegIndexBuf
4515 USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK
4516 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
4517 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
4518}ATOM_INIT_REG_BLOCK;
3705 4519
3706#define END_OF_REG_INDEX_BLOCK 0x0ffff 4520#define END_OF_REG_INDEX_BLOCK 0x0ffff
3707#define END_OF_REG_DATA_BLOCK 0x00000000 4521#define END_OF_REG_DATA_BLOCK 0x00000000
@@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK {
3716#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) 4530#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
3717#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) 4531#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
3718 4532
3719typedef struct _ATOM_MC_INIT_PARAM_TABLE { 4533
3720 ATOM_COMMON_TABLE_HEADER sHeader; 4534typedef struct _ATOM_MC_INIT_PARAM_TABLE
3721 USHORT usAdjustARB_SEQDataOffset; 4535{
3722 USHORT usMCInitMemTypeTblOffset; 4536 ATOM_COMMON_TABLE_HEADER sHeader;
3723 USHORT usMCInitCommonTblOffset; 4537 USHORT usAdjustARB_SEQDataOffset;
3724 USHORT usMCInitPowerDownTblOffset; 4538 USHORT usMCInitMemTypeTblOffset;
3725 ULONG ulARB_SEQDataBuf[32]; 4539 USHORT usMCInitCommonTblOffset;
3726 ATOM_INIT_REG_BLOCK asMCInitMemType; 4540 USHORT usMCInitPowerDownTblOffset;
3727 ATOM_INIT_REG_BLOCK asMCInitCommon; 4541 ULONG ulARB_SEQDataBuf[32];
3728} ATOM_MC_INIT_PARAM_TABLE; 4542 ATOM_INIT_REG_BLOCK asMCInitMemType;
4543 ATOM_INIT_REG_BLOCK asMCInitCommon;
4544}ATOM_MC_INIT_PARAM_TABLE;
4545
3729 4546
3730#define _4Mx16 0x2 4547#define _4Mx16 0x2
3731#define _4Mx32 0x3 4548#define _4Mx32 0x3
@@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE {
3751 4568
3752#define QIMONDA INFINEON 4569#define QIMONDA INFINEON
3753#define PROMOS MOSEL 4570#define PROMOS MOSEL
4571#define KRETON INFINEON
3754 4572
3755/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */ 4573/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
3756 4574
3757#define UCODE_ROM_START_ADDRESS 0x1c000 4575#define UCODE_ROM_START_ADDRESS 0x1c000
3758#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */ 4576#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
3759 4577
3760/* uCode block header for reference */ 4578//uCode block header for reference
3761 4579
3762typedef struct _MCuCodeHeader { 4580typedef struct _MCuCodeHeader
3763 ULONG ulSignature; 4581{
3764 UCHAR ucRevision; 4582 ULONG ulSignature;
3765 UCHAR ucChecksum; 4583 UCHAR ucRevision;
3766 UCHAR ucReserved1; 4584 UCHAR ucChecksum;
3767 UCHAR ucReserved2; 4585 UCHAR ucReserved1;
3768 USHORT usParametersLength; 4586 UCHAR ucReserved2;
3769 USHORT usUCodeLength; 4587 USHORT usParametersLength;
3770 USHORT usReserved1; 4588 USHORT usUCodeLength;
3771 USHORT usReserved2; 4589 USHORT usReserved1;
4590 USHORT usReserved2;
3772} MCuCodeHeader; 4591} MCuCodeHeader;
3773 4592
3774/* //////////////////////////////////////////////////////////////////////////////// */ 4593//////////////////////////////////////////////////////////////////////////////////
3775 4594
3776#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 4595#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
3777 4596
3778#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF 4597#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
3779typedef struct _ATOM_VRAM_MODULE_V1 { 4598typedef struct _ATOM_VRAM_MODULE_V1
3780 ULONG ulReserved; 4599{
3781 USHORT usEMRSValue; 4600 ULONG ulReserved;
3782 USHORT usMRSValue; 4601 USHORT usEMRSValue;
3783 USHORT usReserved; 4602 USHORT usMRSValue;
3784 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4603 USHORT usReserved;
3785 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */ 4604 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3786 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */ 4605 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
3787 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ 4606 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender
3788 UCHAR ucRow; /* Number of Row,in power of 2; */ 4607 UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
3789 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4608 UCHAR ucRow; // Number of Row,in power of 2;
3790 UCHAR ucBank; /* Nunber of Bank; */ 4609 UCHAR ucColumn; // Number of Column,in power of 2;
3791 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4610 UCHAR ucBank; // Nunber of Bank;
3792 UCHAR ucChannelNum; /* Number of channel; */ 4611 UCHAR ucRank; // Number of Rank, in power of 2
3793 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ 4612 UCHAR ucChannelNum; // Number of channel;
3794 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ 4613 UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
3795 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ 4614 UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
3796 UCHAR ucReserved[2]; 4615 UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
3797} ATOM_VRAM_MODULE_V1; 4616 UCHAR ucReserved[2];
3798 4617}ATOM_VRAM_MODULE_V1;
3799typedef struct _ATOM_VRAM_MODULE_V2 { 4618
3800 ULONG ulReserved; 4619
3801 ULONG ulFlags; /* To enable/disable functionalities based on memory type */ 4620typedef struct _ATOM_VRAM_MODULE_V2
3802 ULONG ulEngineClock; /* Override of default engine clock for particular memory type */ 4621{
3803 ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */ 4622 ULONG ulReserved;
3804 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4623 ULONG ulFlags; // To enable/disable functionalities based on memory type
3805 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4624 ULONG ulEngineClock; // Override of default engine clock for particular memory type
3806 USHORT usEMRSValue; 4625 ULONG ulMemoryClock; // Override of default memory clock for particular memory type
3807 USHORT usMRSValue; 4626 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
3808 USHORT usReserved; 4627 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
3809 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4628 USHORT usEMRSValue;
3810 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ 4629 USHORT usMRSValue;
3811 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ 4630 USHORT usReserved;
3812 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ 4631 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3813 UCHAR ucRow; /* Number of Row,in power of 2; */ 4632 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
3814 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4633 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
3815 UCHAR ucBank; /* Nunber of Bank; */ 4634 UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
3816 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4635 UCHAR ucRow; // Number of Row,in power of 2;
3817 UCHAR ucChannelNum; /* Number of channel; */ 4636 UCHAR ucColumn; // Number of Column,in power of 2;
3818 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ 4637 UCHAR ucBank; // Nunber of Bank;
3819 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ 4638 UCHAR ucRank; // Number of Rank, in power of 2
3820 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ 4639 UCHAR ucChannelNum; // Number of channel;
3821 UCHAR ucRefreshRateFactor; 4640 UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
3822 UCHAR ucReserved[3]; 4641 UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
3823} ATOM_VRAM_MODULE_V2; 4642 UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
3824 4643 UCHAR ucRefreshRateFactor;
3825typedef struct _ATOM_MEMORY_TIMING_FORMAT { 4644 UCHAR ucReserved[3];
3826 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ 4645}ATOM_VRAM_MODULE_V2;
3827 union { 4646
3828 USHORT usMRS; /* mode register */ 4647
3829 USHORT usDDR3_MR0; 4648typedef struct _ATOM_MEMORY_TIMING_FORMAT
3830 }; 4649{
3831 union { 4650 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3832 USHORT usEMRS; /* extended mode register */ 4651 union{
3833 USHORT usDDR3_MR1; 4652 USHORT usMRS; // mode register
3834 }; 4653 USHORT usDDR3_MR0;
3835 UCHAR ucCL; /* CAS latency */ 4654 };
3836 UCHAR ucWL; /* WRITE Latency */ 4655 union{
3837 UCHAR uctRAS; /* tRAS */ 4656 USHORT usEMRS; // extended mode register
3838 UCHAR uctRC; /* tRC */ 4657 USHORT usDDR3_MR1;
3839 UCHAR uctRFC; /* tRFC */ 4658 };
3840 UCHAR uctRCDR; /* tRCDR */ 4659 UCHAR ucCL; // CAS latency
3841 UCHAR uctRCDW; /* tRCDW */ 4660 UCHAR ucWL; // WRITE Latency
3842 UCHAR uctRP; /* tRP */ 4661 UCHAR uctRAS; // tRAS
3843 UCHAR uctRRD; /* tRRD */ 4662 UCHAR uctRC; // tRC
3844 UCHAR uctWR; /* tWR */ 4663 UCHAR uctRFC; // tRFC
3845 UCHAR uctWTR; /* tWTR */ 4664 UCHAR uctRCDR; // tRCDR
3846 UCHAR uctPDIX; /* tPDIX */ 4665 UCHAR uctRCDW; // tRCDW
3847 UCHAR uctFAW; /* tFAW */ 4666 UCHAR uctRP; // tRP
3848 UCHAR uctAOND; /* tAOND */ 4667 UCHAR uctRRD; // tRRD
3849 union { 4668 UCHAR uctWR; // tWR
3850 struct { 4669 UCHAR uctWTR; // tWTR
3851 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ 4670 UCHAR uctPDIX; // tPDIX
3852 UCHAR ucReserved; 4671 UCHAR uctFAW; // tFAW
3853 }; 4672 UCHAR uctAOND; // tAOND
3854 USHORT usDDR3_MR2; 4673 union
3855 }; 4674 {
3856} ATOM_MEMORY_TIMING_FORMAT; 4675 struct {
3857 4676 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3858typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 { 4677 UCHAR ucReserved;
3859 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ 4678 };
3860 USHORT usMRS; /* mode register */ 4679 USHORT usDDR3_MR2;
3861 USHORT usEMRS; /* extended mode register */ 4680 };
3862 UCHAR ucCL; /* CAS latency */ 4681}ATOM_MEMORY_TIMING_FORMAT;
3863 UCHAR ucWL; /* WRITE Latency */ 4682
3864 UCHAR uctRAS; /* tRAS */ 4683
3865 UCHAR uctRC; /* tRC */ 4684typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1
3866 UCHAR uctRFC; /* tRFC */ 4685{
3867 UCHAR uctRCDR; /* tRCDR */ 4686 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3868 UCHAR uctRCDW; /* tRCDW */ 4687 USHORT usMRS; // mode register
3869 UCHAR uctRP; /* tRP */ 4688 USHORT usEMRS; // extended mode register
3870 UCHAR uctRRD; /* tRRD */ 4689 UCHAR ucCL; // CAS latency
3871 UCHAR uctWR; /* tWR */ 4690 UCHAR ucWL; // WRITE Latency
3872 UCHAR uctWTR; /* tWTR */ 4691 UCHAR uctRAS; // tRAS
3873 UCHAR uctPDIX; /* tPDIX */ 4692 UCHAR uctRC; // tRC
3874 UCHAR uctFAW; /* tFAW */ 4693 UCHAR uctRFC; // tRFC
3875 UCHAR uctAOND; /* tAOND */ 4694 UCHAR uctRCDR; // tRCDR
3876 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ 4695 UCHAR uctRCDW; // tRCDW
3877/* ///////////////////////GDDR parameters/////////////////////////////////// */ 4696 UCHAR uctRP; // tRP
3878 UCHAR uctCCDL; /* */ 4697 UCHAR uctRRD; // tRRD
3879 UCHAR uctCRCRL; /* */ 4698 UCHAR uctWR; // tWR
3880 UCHAR uctCRCWL; /* */ 4699 UCHAR uctWTR; // tWTR
3881 UCHAR uctCKE; /* */ 4700 UCHAR uctPDIX; // tPDIX
3882 UCHAR uctCKRSE; /* */ 4701 UCHAR uctFAW; // tFAW
3883 UCHAR uctCKRSX; /* */ 4702 UCHAR uctAOND; // tAOND
3884 UCHAR uctFAW32; /* */ 4703 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3885 UCHAR ucReserved1; /* */ 4704////////////////////////////////////GDDR parameters///////////////////////////////////
3886 UCHAR ucReserved2; /* */ 4705 UCHAR uctCCDL; //
3887 UCHAR ucTerminator; 4706 UCHAR uctCRCRL; //
3888} ATOM_MEMORY_TIMING_FORMAT_V1; 4707 UCHAR uctCRCWL; //
3889 4708 UCHAR uctCKE; //
3890typedef struct _ATOM_MEMORY_FORMAT { 4709 UCHAR uctCKRSE; //
3891 ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */ 4710 UCHAR uctCKRSX; //
3892 union { 4711 UCHAR uctFAW32; //
3893 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4712 UCHAR ucMR5lo; //
3894 USHORT usDDR3_Reserved; /* Not used for DDR3 memory */ 4713 UCHAR ucMR5hi; //
3895 }; 4714 UCHAR ucTerminator;
3896 union { 4715}ATOM_MEMORY_TIMING_FORMAT_V1;
3897 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4716
3898 USHORT usDDR3_MR3; /* Used for DDR3 memory */ 4717typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2
3899 }; 4718{
3900 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ 4719 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3901 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ 4720 USHORT usMRS; // mode register
3902 UCHAR ucRow; /* Number of Row,in power of 2; */ 4721 USHORT usEMRS; // extended mode register
3903 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4722 UCHAR ucCL; // CAS latency
3904 UCHAR ucBank; /* Nunber of Bank; */ 4723 UCHAR ucWL; // WRITE Latency
3905 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4724 UCHAR uctRAS; // tRAS
3906 UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */ 4725 UCHAR uctRC; // tRC
3907 UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */ 4726 UCHAR uctRFC; // tRFC
3908 UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */ 4727 UCHAR uctRCDR; // tRCDR
3909 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4728 UCHAR uctRCDW; // tRCDW
3910 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4729 UCHAR uctRP; // tRP
3911 UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */ 4730 UCHAR uctRRD; // tRRD
3912 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4731 UCHAR uctWR; // tWR
3913} ATOM_MEMORY_FORMAT; 4732 UCHAR uctWTR; // tWTR
3914 4733 UCHAR uctPDIX; // tPDIX
3915typedef struct _ATOM_VRAM_MODULE_V3 { 4734 UCHAR uctFAW; // tFAW
3916 ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */ 4735 UCHAR uctAOND; // tAOND
3917 USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */ 4736 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3918 USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */ 4737////////////////////////////////////GDDR parameters///////////////////////////////////
3919 USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */ 4738 UCHAR uctCCDL; //
3920 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4739 UCHAR uctCRCRL; //
3921 UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */ 4740 UCHAR uctCRCWL; //
3922 UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */ 4741 UCHAR uctCKE; //
3923 UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */ 4742 UCHAR uctCKRSE; //
3924 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4743 UCHAR uctCKRSX; //
3925 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4744 UCHAR uctFAW32; //
3926 ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */ 4745 UCHAR ucMR4lo; //
3927} ATOM_VRAM_MODULE_V3; 4746 UCHAR ucMR4hi; //
3928 4747 UCHAR ucMR5lo; //
3929/* ATOM_VRAM_MODULE_V3.ucNPL_RT */ 4748 UCHAR ucMR5hi; //
4749 UCHAR ucTerminator;
4750 UCHAR ucReserved;
4751}ATOM_MEMORY_TIMING_FORMAT_V2;
4752
4753typedef struct _ATOM_MEMORY_FORMAT
4754{
4755 ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock
4756 union{
4757 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4758 USHORT usDDR3_Reserved; // Not used for DDR3 memory
4759 };
4760 union{
4761 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4762 USHORT usDDR3_MR3; // Used for DDR3 memory
4763 };
4764 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
4765 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
4766 UCHAR ucRow; // Number of Row,in power of 2;
4767 UCHAR ucColumn; // Number of Column,in power of 2;
4768 UCHAR ucBank; // Nunber of Bank;
4769 UCHAR ucRank; // Number of Rank, in power of 2
4770 UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8
4771 UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
4772 UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms
4773 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
4774 UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble
4775 UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc
4776 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock
4777}ATOM_MEMORY_FORMAT;
4778
4779
4780typedef struct _ATOM_VRAM_MODULE_V3
4781{
4782 ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination
4783 USHORT usSize; // size of ATOM_VRAM_MODULE_V3
4784 USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage
4785 USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage
4786 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
4787 UCHAR ucChannelNum; // board dependent parameter:Number of channel;
4788 UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit
4789 UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv
4790 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
4791 UCHAR ucFlag; // To enable/disable functionalities based on memory type
4792 ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec
4793}ATOM_VRAM_MODULE_V3;
4794
4795
4796//ATOM_VRAM_MODULE_V3.ucNPL_RT
3930#define NPL_RT_MASK 0x0f 4797#define NPL_RT_MASK 0x0f
3931#define BATTERY_ODT_MASK 0xc0 4798#define BATTERY_ODT_MASK 0xc0
3932 4799
3933#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 4800#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
3934 4801
3935typedef struct _ATOM_VRAM_MODULE_V4 { 4802typedef struct _ATOM_VRAM_MODULE_V4
3936 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ 4803{
3937 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ 4804 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
3938 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4805 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
3939 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ 4806 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3940 USHORT usReserved; 4807 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
3941 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4808 USHORT usReserved;
3942 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ 4809 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3943 UCHAR ucChannelNum; /* Number of channels present in this module config */ 4810 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
3944 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ 4811 UCHAR ucChannelNum; // Number of channels present in this module config
3945 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4812 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
3946 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4813 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
3947 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ 4814 UCHAR ucFlag; // To enable/disable functionalities based on memory type
3948 UCHAR ucVREFI; /* board dependent parameter */ 4815 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
3949 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4816 UCHAR ucVREFI; // board dependent parameter
3950 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4817 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
3951 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4818 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
3952 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ 4819 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3953 UCHAR ucReserved[3]; 4820 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
3954 4821 UCHAR ucReserved[3];
3955/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ 4822
3956 union { 4823//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
3957 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4824 union{
3958 USHORT usDDR3_Reserved; 4825 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
3959 }; 4826 USHORT usDDR3_Reserved;
3960 union { 4827 };
3961 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4828 union{
3962 USHORT usDDR3_MR3; /* Used for DDR3 memory */ 4829 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
3963 }; 4830 USHORT usDDR3_MR3; // Used for DDR3 memory
3964 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ 4831 };
3965 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ 4832 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
3966 UCHAR ucReserved2[2]; 4833 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
3967 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4834 UCHAR ucReserved2[2];
3968} ATOM_VRAM_MODULE_V4; 4835 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4836}ATOM_VRAM_MODULE_V4;
3969 4837
3970#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 4838#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
3971#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 4839#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
@@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 {
3973#define VRAM_MODULE_V4_MISC_BL8 0x4 4841#define VRAM_MODULE_V4_MISC_BL8 0x4
3974#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 4842#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
3975 4843
3976typedef struct _ATOM_VRAM_MODULE_V5 { 4844typedef struct _ATOM_VRAM_MODULE_V5
3977 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ 4845{
3978 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ 4846 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
3979 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4847 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
3980 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ 4848 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3981 USHORT usReserved; 4849 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
3982 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4850 USHORT usReserved;
3983 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ 4851 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3984 UCHAR ucChannelNum; /* Number of channels present in this module config */ 4852 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
3985 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ 4853 UCHAR ucChannelNum; // Number of channels present in this module config
3986 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4854 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
3987 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4855 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
3988 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ 4856 UCHAR ucFlag; // To enable/disable functionalities based on memory type
3989 UCHAR ucVREFI; /* board dependent parameter */ 4857 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
3990 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4858 UCHAR ucVREFI; // board dependent parameter
3991 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4859 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
3992 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4860 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
3993 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ 4861 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3994 UCHAR ucReserved[3]; 4862 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
4863 UCHAR ucReserved[3];
4864
4865//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
4866 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4867 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4868 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
4869 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
4870 UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
4871 UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
4872 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4873}ATOM_VRAM_MODULE_V5;
4874
4875typedef struct _ATOM_VRAM_MODULE_V6
4876{
4877 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
4878 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
4879 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
4880 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
4881 USHORT usReserved;
4882 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
4883 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
4884 UCHAR ucChannelNum; // Number of channels present in this module config
4885 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
4886 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
4887 UCHAR ucFlag; // To enable/disable functionalities based on memory type
4888 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
4889 UCHAR ucVREFI; // board dependent parameter
4890 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
4891 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
4892 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
4893 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
4894 UCHAR ucReserved[3];
4895
4896//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
4897 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4898 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4899 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
4900 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
4901 UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
4902 UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
4903 ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4904}ATOM_VRAM_MODULE_V6;
4905
4906
4907
4908typedef struct _ATOM_VRAM_INFO_V2
4909{
4910 ATOM_COMMON_TABLE_HEADER sHeader;
4911 UCHAR ucNumOfVRAMModule;
4912 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4913}ATOM_VRAM_INFO_V2;
3995 4914
3996/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ 4915typedef struct _ATOM_VRAM_INFO_V3
3997 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4916{
3998 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4917 ATOM_COMMON_TABLE_HEADER sHeader;
3999 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ 4918 USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
4000 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ 4919 USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
4001 UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */ 4920 USHORT usRerseved;
4002 UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */ 4921 UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator
4003 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4922 UCHAR ucNumOfVRAMModule;
4004} ATOM_VRAM_MODULE_V5; 4923 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4005 4924 ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
4006typedef struct _ATOM_VRAM_INFO_V2 { 4925 // ATOM_INIT_REG_BLOCK aMemAdjust;
4007 ATOM_COMMON_TABLE_HEADER sHeader; 4926}ATOM_VRAM_INFO_V3;
4008 UCHAR ucNumOfVRAMModule;
4009 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4010} ATOM_VRAM_INFO_V2;
4011
4012typedef struct _ATOM_VRAM_INFO_V3 {
4013 ATOM_COMMON_TABLE_HEADER sHeader;
4014 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
4015 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
4016 USHORT usRerseved;
4017 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
4018 UCHAR ucNumOfVRAMModule;
4019 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4020 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
4021 /* ATOM_INIT_REG_BLOCK aMemAdjust; */
4022} ATOM_VRAM_INFO_V3;
4023 4927
4024#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 4928#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
4025 4929
4026typedef struct _ATOM_VRAM_INFO_V4 { 4930typedef struct _ATOM_VRAM_INFO_V4
4027 ATOM_COMMON_TABLE_HEADER sHeader; 4931{
4028 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */ 4932 ATOM_COMMON_TABLE_HEADER sHeader;
4029 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */ 4933 USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
4030 USHORT usRerseved; 4934 USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
4031 UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */ 4935 USHORT usRerseved;
4032 ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */ 4936 UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
4033 UCHAR ucReservde[4]; 4937 ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
4034 UCHAR ucNumOfVRAMModule; 4938 UCHAR ucReservde[4];
4035 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ 4939 UCHAR ucNumOfVRAMModule;
4036 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */ 4940 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4037 /* ATOM_INIT_REG_BLOCK aMemAdjust; */ 4941 ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
4038} ATOM_VRAM_INFO_V4; 4942 // ATOM_INIT_REG_BLOCK aMemAdjust;
4039 4943}ATOM_VRAM_INFO_V4;
4040typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO { 4944
4041 ATOM_COMMON_TABLE_HEADER sHeader; 4945typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
4042 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */ 4946{
4043} ATOM_VRAM_GPIO_DETECTION_INFO; 4947 ATOM_COMMON_TABLE_HEADER sHeader;
4044 4948 UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator
4045typedef struct _ATOM_MEMORY_TRAINING_INFO { 4949}ATOM_VRAM_GPIO_DETECTION_INFO;
4046 ATOM_COMMON_TABLE_HEADER sHeader; 4950
4047 UCHAR ucTrainingLoop; 4951
4048 UCHAR ucReserved[3]; 4952typedef struct _ATOM_MEMORY_TRAINING_INFO
4049 ATOM_INIT_REG_BLOCK asMemTrainingSetting; 4953{
4050} ATOM_MEMORY_TRAINING_INFO; 4954 ATOM_COMMON_TABLE_HEADER sHeader;
4051 4955 UCHAR ucTrainingLoop;
4052typedef struct SW_I2C_CNTL_DATA_PARAMETERS { 4956 UCHAR ucReserved[3];
4053 UCHAR ucControl; 4957 ATOM_INIT_REG_BLOCK asMemTrainingSetting;
4054 UCHAR ucData; 4958}ATOM_MEMORY_TRAINING_INFO;
4055 UCHAR ucSatus; 4959
4056 UCHAR ucTemp; 4960
4961typedef struct SW_I2C_CNTL_DATA_PARAMETERS
4962{
4963 UCHAR ucControl;
4964 UCHAR ucData;
4965 UCHAR ucSatus;
4966 UCHAR ucTemp;
4057} SW_I2C_CNTL_DATA_PARAMETERS; 4967} SW_I2C_CNTL_DATA_PARAMETERS;
4058 4968
4059#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS 4969#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
4060 4970
4061typedef struct _SW_I2C_IO_DATA_PARAMETERS { 4971typedef struct _SW_I2C_IO_DATA_PARAMETERS
4062 USHORT GPIO_Info; 4972{
4063 UCHAR ucAct; 4973 USHORT GPIO_Info;
4064 UCHAR ucData; 4974 UCHAR ucAct;
4065} SW_I2C_IO_DATA_PARAMETERS; 4975 UCHAR ucData;
4976 } SW_I2C_IO_DATA_PARAMETERS;
4066 4977
4067#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS 4978#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
4068 4979
@@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS {
4087#define SW_I2C_CNTL_CLOSE 5 4998#define SW_I2C_CNTL_CLOSE 5
4088#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
4089 5000
4090/* ==============================VESA definition Portion=============================== */ 5001//==============================VESA definition Portion===============================
4091#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV '01.00'
4092#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */ 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
4093#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
4094#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
4095 5006
4096typedef struct _PTR_32_BIT_STRUCTURE { 5007typedef struct _PTR_32_BIT_STRUCTURE
4097 USHORT Offset16; 5008{
4098 USHORT Segment16; 5009 USHORT Offset16;
5010 USHORT Segment16;
4099} PTR_32_BIT_STRUCTURE; 5011} PTR_32_BIT_STRUCTURE;
4100 5012
4101typedef union _PTR_32_BIT_UNION { 5013typedef union _PTR_32_BIT_UNION
4102 PTR_32_BIT_STRUCTURE SegmentOffset; 5014{
4103 ULONG Ptr32_Bit; 5015 PTR_32_BIT_STRUCTURE SegmentOffset;
5016 ULONG Ptr32_Bit;
4104} PTR_32_BIT_UNION; 5017} PTR_32_BIT_UNION;
4105 5018
4106typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE { 5019typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
4107 UCHAR VbeSignature[4]; 5020{
4108 USHORT VbeVersion; 5021 UCHAR VbeSignature[4];
4109 PTR_32_BIT_UNION OemStringPtr; 5022 USHORT VbeVersion;
4110 UCHAR Capabilities[4]; 5023 PTR_32_BIT_UNION OemStringPtr;
4111 PTR_32_BIT_UNION VideoModePtr; 5024 UCHAR Capabilities[4];
4112 USHORT TotalMemory; 5025 PTR_32_BIT_UNION VideoModePtr;
5026 USHORT TotalMemory;
4113} VBE_1_2_INFO_BLOCK_UPDATABLE; 5027} VBE_1_2_INFO_BLOCK_UPDATABLE;
4114 5028
4115typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE { 5029
4116 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; 5030typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
4117 USHORT OemSoftRev; 5031{
4118 PTR_32_BIT_UNION OemVendorNamePtr; 5032 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
4119 PTR_32_BIT_UNION OemProductNamePtr; 5033 USHORT OemSoftRev;
4120 PTR_32_BIT_UNION OemProductRevPtr; 5034 PTR_32_BIT_UNION OemVendorNamePtr;
5035 PTR_32_BIT_UNION OemProductNamePtr;
5036 PTR_32_BIT_UNION OemProductRevPtr;
4121} VBE_2_0_INFO_BLOCK_UPDATABLE; 5037} VBE_2_0_INFO_BLOCK_UPDATABLE;
4122 5038
4123typedef union _VBE_VERSION_UNION { 5039typedef union _VBE_VERSION_UNION
4124 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; 5040{
4125 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; 5041 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
5042 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
4126} VBE_VERSION_UNION; 5043} VBE_VERSION_UNION;
4127 5044
4128typedef struct _VBE_INFO_BLOCK { 5045typedef struct _VBE_INFO_BLOCK
4129 VBE_VERSION_UNION UpdatableVBE_Info; 5046{
4130 UCHAR Reserved[222]; 5047 VBE_VERSION_UNION UpdatableVBE_Info;
4131 UCHAR OemData[256]; 5048 UCHAR Reserved[222];
5049 UCHAR OemData[256];
4132} VBE_INFO_BLOCK; 5050} VBE_INFO_BLOCK;
4133 5051
4134typedef struct _VBE_FP_INFO { 5052typedef struct _VBE_FP_INFO
4135 USHORT HSize; 5053{
4136 USHORT VSize; 5054 USHORT HSize;
4137 USHORT FPType; 5055 USHORT VSize;
4138 UCHAR RedBPP; 5056 USHORT FPType;
4139 UCHAR GreenBPP; 5057 UCHAR RedBPP;
4140 UCHAR BlueBPP; 5058 UCHAR GreenBPP;
4141 UCHAR ReservedBPP; 5059 UCHAR BlueBPP;
4142 ULONG RsvdOffScrnMemSize; 5060 UCHAR ReservedBPP;
4143 ULONG RsvdOffScrnMEmPtr; 5061 ULONG RsvdOffScrnMemSize;
4144 UCHAR Reserved[14]; 5062 ULONG RsvdOffScrnMEmPtr;
5063 UCHAR Reserved[14];
4145} VBE_FP_INFO; 5064} VBE_FP_INFO;
4146 5065
4147typedef struct _VESA_MODE_INFO_BLOCK { 5066typedef struct _VESA_MODE_INFO_BLOCK
4148/* Mandatory information for all VBE revisions */ 5067{
4149 USHORT ModeAttributes; /* dw ? ; mode attributes */ 5068// Mandatory information for all VBE revisions
4150 UCHAR WinAAttributes; /* db ? ; window A attributes */ 5069 USHORT ModeAttributes; // dw ? ; mode attributes
4151 UCHAR WinBAttributes; /* db ? ; window B attributes */ 5070 UCHAR WinAAttributes; // db ? ; window A attributes
4152 USHORT WinGranularity; /* dw ? ; window granularity */ 5071 UCHAR WinBAttributes; // db ? ; window B attributes
4153 USHORT WinSize; /* dw ? ; window size */ 5072 USHORT WinGranularity; // dw ? ; window granularity
4154 USHORT WinASegment; /* dw ? ; window A start segment */ 5073 USHORT WinSize; // dw ? ; window size
4155 USHORT WinBSegment; /* dw ? ; window B start segment */ 5074 USHORT WinASegment; // dw ? ; window A start segment
4156 ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */ 5075 USHORT WinBSegment; // dw ? ; window B start segment
4157 USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */ 5076 ULONG WinFuncPtr; // dd ? ; real mode pointer to window function
4158 5077 USHORT BytesPerScanLine;// dw ? ; bytes per scan line
4159/* ; Mandatory information for VBE 1.2 and above */ 5078
4160 USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */ 5079//; Mandatory information for VBE 1.2 and above
4161 USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */ 5080 USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters
4162 UCHAR XCharSize; /* db ? ; character cell width in pixels */ 5081 USHORT YResolution; // dw ? ; vertical resolution in pixels or characters
4163 UCHAR YCharSize; /* db ? ; character cell height in pixels */ 5082 UCHAR XCharSize; // db ? ; character cell width in pixels
4164 UCHAR NumberOfPlanes; /* db ? ; number of memory planes */ 5083 UCHAR YCharSize; // db ? ; character cell height in pixels
4165 UCHAR BitsPerPixel; /* db ? ; bits per pixel */ 5084 UCHAR NumberOfPlanes; // db ? ; number of memory planes
4166 UCHAR NumberOfBanks; /* db ? ; number of banks */ 5085 UCHAR BitsPerPixel; // db ? ; bits per pixel
4167 UCHAR MemoryModel; /* db ? ; memory model type */ 5086 UCHAR NumberOfBanks; // db ? ; number of banks
4168 UCHAR BankSize; /* db ? ; bank size in KB */ 5087 UCHAR MemoryModel; // db ? ; memory model type
4169 UCHAR NumberOfImagePages; /* db ? ; number of images */ 5088 UCHAR BankSize; // db ? ; bank size in KB
4170 UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */ 5089 UCHAR NumberOfImagePages;// db ? ; number of images
4171 5090 UCHAR ReservedForPageFunction;//db 1 ; reserved for page function
4172/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */ 5091
4173 UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */ 5092//; Direct Color fields(required for direct/6 and YUV/7 memory models)
4174 UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */ 5093 UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits
4175 UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */ 5094 UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask
4176 UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */ 5095 UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits
4177 UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */ 5096 UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask
4178 UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */ 5097 UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits
4179 UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */ 5098 UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask
4180 UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */ 5099 UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits
4181 UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */ 5100 UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask
4182 5101 UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes
4183/* ; Mandatory information for VBE 2.0 and above */ 5102
4184 ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */ 5103//; Mandatory information for VBE 2.0 and above
4185 ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */ 5104 ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer
4186 USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */ 5105 ULONG Reserved_1; // dd 0 ; reserved - always set to 0
4187 5106 USHORT Reserved_2; // dw 0 ; reserved - always set to 0
4188/* ; Mandatory information for VBE 3.0 and above */ 5107
4189 USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */ 5108//; Mandatory information for VBE 3.0 and above
4190 UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */ 5109 USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes
4191 UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */ 5110 UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes
4192 UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */ 5111 UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes
4193 UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */ 5112 UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes)
4194 UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */ 5113 UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes)
4195 UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */ 5114 UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes)
4196 UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */ 5115 UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes)
4197 UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */ 5116 UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes)
4198 UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */ 5117 UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes)
4199 UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */ 5118 UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes)
4200 ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */ 5119 UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes)
4201 UCHAR Reserved; /* db 190 dup (0) */ 5120 ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode
5121 UCHAR Reserved; // db 190 dup (0)
4202} VESA_MODE_INFO_BLOCK; 5122} VESA_MODE_INFO_BLOCK;
4203 5123
4204/* BIOS function CALLS */ 5124// BIOS function CALLS
4205#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */ 5125#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code
4206#define ATOM_BIOS_FUNCTION_COP_MODE 0x00 5126#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
4207#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 5127#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
4208#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 5128#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
4209#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 5129#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
4210#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B 5130#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
4211#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E 5131#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
4212#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F 5132#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
4213#define ATOM_BIOS_FUNCTION_STV_STD 0x16 5133#define ATOM_BIOS_FUNCTION_STV_STD 0x16
@@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK {
4217#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 5137#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
4218#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 5138#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
4219#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 5139#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
4220#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A 5140#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
4221#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B 5141#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
4222#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */ 5142#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80
4223#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */ 5143#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80
4224 5144
4225#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D 5145#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
4226#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E 5146#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
4227#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F 5147#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
4228#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */ 5148#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03
4229#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */ 5149#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7
4230#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */ 5150#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state
4231#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */ 5151#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state
4232#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */ 5152#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85
4233#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */ 5153#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
4234#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */ 5154#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported
4235 5155
4236#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */ 5156
4237#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */ 5157#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS
4238#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */ 5158#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01
4239#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */ 5159#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02
4240#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */ 5160#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON.
4241#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */ 5161#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY
4242#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */ 5162#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND
4243#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */ 5163#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF
5164#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
4244 5165
4245#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L 5166#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
4246#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L 5167#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
4247#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL 5168#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
4248 5169
4249/* structure used for VBIOS only */ 5170// structure used for VBIOS only
4250 5171
4251/* DispOutInfoTable */ 5172//DispOutInfoTable
4252typedef struct _ASIC_TRANSMITTER_INFO { 5173typedef struct _ASIC_TRANSMITTER_INFO
5174{
4253 USHORT usTransmitterObjId; 5175 USHORT usTransmitterObjId;
4254 USHORT usSupportDevice; 5176 USHORT usSupportDevice;
4255 UCHAR ucTransmitterCmdTblId; 5177 UCHAR ucTransmitterCmdTblId;
4256 UCHAR ucConfig; 5178 UCHAR ucConfig;
4257 UCHAR ucEncoderID; /* available 1st encoder ( default ) */ 5179 UCHAR ucEncoderID; //available 1st encoder ( default )
4258 UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */ 5180 UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
4259 UCHAR uc2ndEncoderID; 5181 UCHAR uc2ndEncoderID;
4260 UCHAR ucReserved; 5182 UCHAR ucReserved;
4261} ASIC_TRANSMITTER_INFO; 5183}ASIC_TRANSMITTER_INFO;
4262 5184
4263typedef struct _ASIC_ENCODER_INFO { 5185typedef struct _ASIC_ENCODER_INFO
5186{
4264 UCHAR ucEncoderID; 5187 UCHAR ucEncoderID;
4265 UCHAR ucEncoderConfig; 5188 UCHAR ucEncoderConfig;
4266 USHORT usEncoderCmdTblId; 5189 USHORT usEncoderCmdTblId;
4267} ASIC_ENCODER_INFO; 5190}ASIC_ENCODER_INFO;
5191
5192typedef struct _ATOM_DISP_OUT_INFO
5193{
5194 ATOM_COMMON_TABLE_HEADER sHeader;
5195 USHORT ptrTransmitterInfo;
5196 USHORT ptrEncoderInfo;
5197 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
5198 ASIC_ENCODER_INFO asEncoderInfo[1];
5199}ATOM_DISP_OUT_INFO;
4268 5200
4269typedef struct _ATOM_DISP_OUT_INFO { 5201typedef struct _ATOM_DISP_OUT_INFO_V2
4270 ATOM_COMMON_TABLE_HEADER sHeader; 5202{
5203 ATOM_COMMON_TABLE_HEADER sHeader;
4271 USHORT ptrTransmitterInfo; 5204 USHORT ptrTransmitterInfo;
4272 USHORT ptrEncoderInfo; 5205 USHORT ptrEncoderInfo;
4273 ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; 5206 USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
4274 ASIC_ENCODER_INFO asEncoderInfo[1]; 5207 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
4275} ATOM_DISP_OUT_INFO; 5208 ASIC_ENCODER_INFO asEncoderInfo[1];
5209}ATOM_DISP_OUT_INFO_V2;
4276 5210
4277/* DispDevicePriorityInfo */ 5211// DispDevicePriorityInfo
4278typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO { 5212typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
4279 ATOM_COMMON_TABLE_HEADER sHeader; 5213{
5214 ATOM_COMMON_TABLE_HEADER sHeader;
4280 USHORT asDevicePriority[16]; 5215 USHORT asDevicePriority[16];
4281} ATOM_DISPLAY_DEVICE_PRIORITY_INFO; 5216}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
4282 5217
4283/* ProcessAuxChannelTransactionTable */ 5218//ProcessAuxChannelTransactionTable
4284typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS { 5219typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4285 USHORT lpAuxRequest; 5220{
4286 USHORT lpDataOut; 5221 USHORT lpAuxRequest;
4287 UCHAR ucChannelID; 5222 USHORT lpDataOut;
4288 union { 5223 UCHAR ucChannelID;
4289 UCHAR ucReplyStatus; 5224 union
4290 UCHAR ucDelay; 5225 {
5226 UCHAR ucReplyStatus;
5227 UCHAR ucDelay;
5228 };
5229 UCHAR ucDataOutLen;
5230 UCHAR ucReserved;
5231}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
5232
5233//ProcessAuxChannelTransactionTable
5234typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
5235{
5236 USHORT lpAuxRequest;
5237 USHORT lpDataOut;
5238 UCHAR ucChannelID;
5239 union
5240 {
5241 UCHAR ucReplyStatus;
5242 UCHAR ucDelay;
4291 }; 5243 };
4292 UCHAR ucDataOutLen; 5244 UCHAR ucDataOutLen;
4293 UCHAR ucReserved; 5245 UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
4294} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; 5246}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
4295 5247
4296#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS 5248#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4297 5249
4298/* GetSinkType */ 5250//GetSinkType
4299 5251
4300typedef struct _DP_ENCODER_SERVICE_PARAMETERS { 5252typedef struct _DP_ENCODER_SERVICE_PARAMETERS
5253{
4301 USHORT ucLinkClock; 5254 USHORT ucLinkClock;
4302 union { 5255 union
4303 UCHAR ucConfig; /* for DP training command */ 5256 {
4304 UCHAR ucI2cId; /* use for GET_SINK_TYPE command */ 5257 UCHAR ucConfig; // for DP training command
5258 UCHAR ucI2cId; // use for GET_SINK_TYPE command
4305 }; 5259 };
4306 UCHAR ucAction; 5260 UCHAR ucAction;
4307 UCHAR ucStatus; 5261 UCHAR ucStatus;
4308 UCHAR ucLaneNum; 5262 UCHAR ucLaneNum;
4309 UCHAR ucReserved[2]; 5263 UCHAR ucReserved[2];
4310} DP_ENCODER_SERVICE_PARAMETERS; 5264}DP_ENCODER_SERVICE_PARAMETERS;
4311 5265
4312/* ucAction */ 5266// ucAction
4313#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 5267#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
5268/* obselete */
4314#define ATOM_DP_ACTION_TRAINING_START 0x02 5269#define ATOM_DP_ACTION_TRAINING_START 0x02
4315#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03 5270#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
4316#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04 5271#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
@@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4318#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06 5273#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
4319#define ATOM_DP_ACTION_BLANKING 0x07 5274#define ATOM_DP_ACTION_BLANKING 0x07
4320 5275
4321/* ucConfig */ 5276// ucConfig
4322#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03 5277#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
4323#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00 5278#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
4324#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01 5279#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
@@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4326#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04 5281#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
4327#define ATOM_DP_CONFIG_LINK_A 0x00 5282#define ATOM_DP_CONFIG_LINK_A 0x00
4328#define ATOM_DP_CONFIG_LINK_B 0x04 5283#define ATOM_DP_CONFIG_LINK_B 0x04
4329 5284/* /obselete */
4330#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 5285#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
4331 5286
4332/* DP_TRAINING_TABLE */ 5287// DP_TRAINING_TABLE
4333#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR 5288#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
4334#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) 5289#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
4335#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16) 5290#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 )
4336#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24) 5291#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 )
4337#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) 5292#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
4338#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) 5293#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
4339#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) 5294#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
@@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4341#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) 5296#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
4342#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) 5297#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
4343#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) 5298#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
4344#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) 5299#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
5300#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84)
4345 5301
4346typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS { 5302typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4347 UCHAR ucI2CSpeed; 5303{
4348 union { 5304 UCHAR ucI2CSpeed;
4349 UCHAR ucRegIndex; 5305 union
4350 UCHAR ucStatus; 5306 {
5307 UCHAR ucRegIndex;
5308 UCHAR ucStatus;
4351 }; 5309 };
4352 USHORT lpI2CDataOut; 5310 USHORT lpI2CDataOut;
4353 UCHAR ucFlag; 5311 UCHAR ucFlag;
4354 UCHAR ucTransBytes; 5312 UCHAR ucTransBytes;
4355 UCHAR ucSlaveAddr; 5313 UCHAR ucSlaveAddr;
4356 UCHAR ucLineNumber; 5314 UCHAR ucLineNumber;
4357} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; 5315}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
4358 5316
4359#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS 5317#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4360 5318
4361/* ucFlag */ 5319//ucFlag
4362#define HW_I2C_WRITE 1 5320#define HW_I2C_WRITE 1
4363#define HW_I2C_READ 0 5321#define HW_I2C_READ 0
5322#define I2C_2BYTE_ADDR 0x02
4364 5323
5324typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
5325{
5326 UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
5327 UCHAR ucReserved[3];
5328}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
5329
5330#define HWBLKINST_INSTANCE_MASK 0x07
5331#define HWBLKINST_HWBLK_MASK 0xF0
5332#define HWBLKINST_HWBLK_SHIFT 0x04
5333
5334//ucHWBlock
5335#define SELECT_DISP_ENGINE 0
5336#define SELECT_DISP_PLL 1
5337#define SELECT_DCIO_UNIPHY_LINK0 2
5338#define SELECT_DCIO_UNIPHY_LINK1 3
5339#define SELECT_DCIO_IMPCAL 4
5340#define SELECT_DCIO_DIG 6
5341#define SELECT_CRTC_PIXEL_RATE 7
5342
5343/****************************************************************************/
5344//Portion VI: Definitinos for vbios MC scratch registers that driver used
4365/****************************************************************************/ 5345/****************************************************************************/
4366/* Portion VI: Definitinos being oboselete */ 5346
5347#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000
5348#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000
5349#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000
5350#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
5351#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
5352#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
5353#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
5354
5355/****************************************************************************/
5356//Portion VI: Definitinos being oboselete
4367/****************************************************************************/ 5357/****************************************************************************/
4368 5358
4369/* ========================================================================================== */ 5359//==========================================================================================
4370/* Remove the definitions below when driver is ready! */ 5360//Remove the definitions below when driver is ready!
4371typedef struct _ATOM_DAC_INFO { 5361typedef struct _ATOM_DAC_INFO
4372 ATOM_COMMON_TABLE_HEADER sHeader; 5362{
4373 USHORT usMaxFrequency; /* in 10kHz unit */ 5363 ATOM_COMMON_TABLE_HEADER sHeader;
4374 USHORT usReserved; 5364 USHORT usMaxFrequency; // in 10kHz unit
4375} ATOM_DAC_INFO; 5365 USHORT usReserved;
4376 5366}ATOM_DAC_INFO;
4377typedef struct _COMPASSIONATE_DATA { 5367
4378 ATOM_COMMON_TABLE_HEADER sHeader; 5368
4379 5369typedef struct _COMPASSIONATE_DATA
4380 /* ============================== DAC1 portion */ 5370{
4381 UCHAR ucDAC1_BG_Adjustment; 5371 ATOM_COMMON_TABLE_HEADER sHeader;
4382 UCHAR ucDAC1_DAC_Adjustment; 5372
4383 USHORT usDAC1_FORCE_Data; 5373 //============================== DAC1 portion
4384 /* ============================== DAC2 portion */ 5374 UCHAR ucDAC1_BG_Adjustment;
4385 UCHAR ucDAC2_CRT2_BG_Adjustment; 5375 UCHAR ucDAC1_DAC_Adjustment;
4386 UCHAR ucDAC2_CRT2_DAC_Adjustment; 5376 USHORT usDAC1_FORCE_Data;
4387 USHORT usDAC2_CRT2_FORCE_Data; 5377 //============================== DAC2 portion
4388 USHORT usDAC2_CRT2_MUX_RegisterIndex; 5378 UCHAR ucDAC2_CRT2_BG_Adjustment;
4389 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5379 UCHAR ucDAC2_CRT2_DAC_Adjustment;
4390 UCHAR ucDAC2_NTSC_BG_Adjustment; 5380 USHORT usDAC2_CRT2_FORCE_Data;
4391 UCHAR ucDAC2_NTSC_DAC_Adjustment; 5381 USHORT usDAC2_CRT2_MUX_RegisterIndex;
4392 USHORT usDAC2_TV1_FORCE_Data; 5382 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4393 USHORT usDAC2_TV1_MUX_RegisterIndex; 5383 UCHAR ucDAC2_NTSC_BG_Adjustment;
4394 UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5384 UCHAR ucDAC2_NTSC_DAC_Adjustment;
4395 UCHAR ucDAC2_CV_BG_Adjustment; 5385 USHORT usDAC2_TV1_FORCE_Data;
4396 UCHAR ucDAC2_CV_DAC_Adjustment; 5386 USHORT usDAC2_TV1_MUX_RegisterIndex;
4397 USHORT usDAC2_CV_FORCE_Data; 5387 UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4398 USHORT usDAC2_CV_MUX_RegisterIndex; 5388 UCHAR ucDAC2_CV_BG_Adjustment;
4399 UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5389 UCHAR ucDAC2_CV_DAC_Adjustment;
4400 UCHAR ucDAC2_PAL_BG_Adjustment; 5390 USHORT usDAC2_CV_FORCE_Data;
4401 UCHAR ucDAC2_PAL_DAC_Adjustment; 5391 USHORT usDAC2_CV_MUX_RegisterIndex;
4402 USHORT usDAC2_TV2_FORCE_Data; 5392 UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4403} COMPASSIONATE_DATA; 5393 UCHAR ucDAC2_PAL_BG_Adjustment;
5394 UCHAR ucDAC2_PAL_DAC_Adjustment;
5395 USHORT usDAC2_TV2_FORCE_Data;
5396}COMPASSIONATE_DATA;
4404 5397
4405/****************************Supported Device Info Table Definitions**********************/ 5398/****************************Supported Device Info Table Definitions**********************/
4406/* ucConnectInfo: */ 5399// ucConnectInfo:
4407/* [7:4] - connector type */ 5400// [7:4] - connector type
4408/* = 1 - VGA connector */ 5401// = 1 - VGA connector
4409/* = 2 - DVI-I */ 5402// = 2 - DVI-I
4410/* = 3 - DVI-D */ 5403// = 3 - DVI-D
4411/* = 4 - DVI-A */ 5404// = 4 - DVI-A
4412/* = 5 - SVIDEO */ 5405// = 5 - SVIDEO
4413/* = 6 - COMPOSITE */ 5406// = 6 - COMPOSITE
4414/* = 7 - LVDS */ 5407// = 7 - LVDS
4415/* = 8 - DIGITAL LINK */ 5408// = 8 - DIGITAL LINK
4416/* = 9 - SCART */ 5409// = 9 - SCART
4417/* = 0xA - HDMI_type A */ 5410// = 0xA - HDMI_type A
4418/* = 0xB - HDMI_type B */ 5411// = 0xB - HDMI_type B
4419/* = 0xE - Special case1 (DVI+DIN) */ 5412// = 0xE - Special case1 (DVI+DIN)
4420/* Others=TBD */ 5413// Others=TBD
4421/* [3:0] - DAC Associated */ 5414// [3:0] - DAC Associated
4422/* = 0 - no DAC */ 5415// = 0 - no DAC
4423/* = 1 - DACA */ 5416// = 1 - DACA
4424/* = 2 - DACB */ 5417// = 2 - DACB
4425/* = 3 - External DAC */ 5418// = 3 - External DAC
4426/* Others=TBD */ 5419// Others=TBD
4427/* */ 5420//
4428 5421
4429typedef struct _ATOM_CONNECTOR_INFO { 5422typedef struct _ATOM_CONNECTOR_INFO
5423{
4430#if ATOM_BIG_ENDIAN 5424#if ATOM_BIG_ENDIAN
4431 UCHAR bfConnectorType:4; 5425 UCHAR bfConnectorType:4;
4432 UCHAR bfAssociatedDAC:4; 5426 UCHAR bfAssociatedDAC:4;
4433#else 5427#else
4434 UCHAR bfAssociatedDAC:4; 5428 UCHAR bfAssociatedDAC:4;
4435 UCHAR bfConnectorType:4; 5429 UCHAR bfConnectorType:4;
4436#endif 5430#endif
4437} ATOM_CONNECTOR_INFO; 5431}ATOM_CONNECTOR_INFO;
5432
5433typedef union _ATOM_CONNECTOR_INFO_ACCESS
5434{
5435 ATOM_CONNECTOR_INFO sbfAccess;
5436 UCHAR ucAccess;
5437}ATOM_CONNECTOR_INFO_ACCESS;
4438 5438
4439typedef union _ATOM_CONNECTOR_INFO_ACCESS { 5439typedef struct _ATOM_CONNECTOR_INFO_I2C
4440 ATOM_CONNECTOR_INFO sbfAccess; 5440{
4441 UCHAR ucAccess; 5441 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4442} ATOM_CONNECTOR_INFO_ACCESS; 5442 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
5443}ATOM_CONNECTOR_INFO_I2C;
4443 5444
4444typedef struct _ATOM_CONNECTOR_INFO_I2C {
4445 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4446 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
4447} ATOM_CONNECTOR_INFO_I2C;
4448 5445
4449typedef struct _ATOM_SUPPORTED_DEVICES_INFO { 5446typedef struct _ATOM_SUPPORTED_DEVICES_INFO
4450 ATOM_COMMON_TABLE_HEADER sHeader; 5447{
4451 USHORT usDeviceSupport; 5448 ATOM_COMMON_TABLE_HEADER sHeader;
4452 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; 5449 USHORT usDeviceSupport;
4453} ATOM_SUPPORTED_DEVICES_INFO; 5450 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
5451}ATOM_SUPPORTED_DEVICES_INFO;
4454 5452
4455#define NO_INT_SRC_MAPPED 0xFF 5453#define NO_INT_SRC_MAPPED 0xFF
4456 5454
4457typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP { 5455typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
4458 UCHAR ucIntSrcBitmap; 5456{
4459} ATOM_CONNECTOR_INC_SRC_BITMAP; 5457 UCHAR ucIntSrcBitmap;
4460 5458}ATOM_CONNECTOR_INC_SRC_BITMAP;
4461typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 { 5459
4462 ATOM_COMMON_TABLE_HEADER sHeader; 5460typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
4463 USHORT usDeviceSupport; 5461{
4464 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; 5462 ATOM_COMMON_TABLE_HEADER sHeader;
4465 ATOM_CONNECTOR_INC_SRC_BITMAP 5463 USHORT usDeviceSupport;
4466 asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; 5464 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4467} ATOM_SUPPORTED_DEVICES_INFO_2; 5465 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4468 5466}ATOM_SUPPORTED_DEVICES_INFO_2;
4469typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 { 5467
4470 ATOM_COMMON_TABLE_HEADER sHeader; 5468typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
4471 USHORT usDeviceSupport; 5469{
4472 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; 5470 ATOM_COMMON_TABLE_HEADER sHeader;
4473 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; 5471 USHORT usDeviceSupport;
4474} ATOM_SUPPORTED_DEVICES_INFO_2d1; 5472 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
5473 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
5474}ATOM_SUPPORTED_DEVICES_INFO_2d1;
4475 5475
4476#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 5476#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
4477 5477
4478typedef struct _ATOM_MISC_CONTROL_INFO { 5478
4479 USHORT usFrequency; 5479
4480 UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */ 5480typedef struct _ATOM_MISC_CONTROL_INFO
4481 UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */ 5481{
4482 UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */ 5482 USHORT usFrequency;
4483 UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */ 5483 UCHAR ucPLL_ChargePump; // PLL charge-pump gain control
4484} ATOM_MISC_CONTROL_INFO; 5484 UCHAR ucPLL_DutyCycle; // PLL duty cycle control
5485 UCHAR ucPLL_VCO_Gain; // PLL VCO gain control
5486 UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control
5487}ATOM_MISC_CONTROL_INFO;
5488
4485 5489
4486#define ATOM_MAX_MISC_INFO 4 5490#define ATOM_MAX_MISC_INFO 4
4487 5491
4488typedef struct _ATOM_TMDS_INFO { 5492typedef struct _ATOM_TMDS_INFO
4489 ATOM_COMMON_TABLE_HEADER sHeader; 5493{
4490 USHORT usMaxFrequency; /* in 10Khz */ 5494 ATOM_COMMON_TABLE_HEADER sHeader;
4491 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; 5495 USHORT usMaxFrequency; // in 10Khz
4492} ATOM_TMDS_INFO; 5496 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
5497}ATOM_TMDS_INFO;
5498
5499
5500typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
5501{
5502 UCHAR ucTVStandard; //Same as TV standards defined above,
5503 UCHAR ucPadding[1];
5504}ATOM_ENCODER_ANALOG_ATTRIBUTE;
4493 5505
4494typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE { 5506typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
4495 UCHAR ucTVStandard; /* Same as TV standards defined above, */ 5507{
4496 UCHAR ucPadding[1]; 5508 UCHAR ucAttribute; //Same as other digital encoder attributes defined above
4497} ATOM_ENCODER_ANALOG_ATTRIBUTE; 5509 UCHAR ucPadding[1];
5510}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
4498 5511
4499typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE { 5512typedef union _ATOM_ENCODER_ATTRIBUTE
4500 UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */ 5513{
4501 UCHAR ucPadding[1]; 5514 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4502} ATOM_ENCODER_DIGITAL_ATTRIBUTE; 5515 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
5516}ATOM_ENCODER_ATTRIBUTE;
4503 5517
4504typedef union _ATOM_ENCODER_ATTRIBUTE {
4505 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4506 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
4507} ATOM_ENCODER_ATTRIBUTE;
4508 5518
4509typedef struct _DVO_ENCODER_CONTROL_PARAMETERS { 5519typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
4510 USHORT usPixelClock; 5520{
4511 USHORT usEncoderID; 5521 USHORT usPixelClock;
4512 UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */ 5522 USHORT usEncoderID;
4513 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ 5523 UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
4514 ATOM_ENCODER_ATTRIBUTE usDevAttr; 5524 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
4515} DVO_ENCODER_CONTROL_PARAMETERS; 5525 ATOM_ENCODER_ATTRIBUTE usDevAttr;
5526}DVO_ENCODER_CONTROL_PARAMETERS;
5527
5528typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
5529{
5530 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
5531 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
5532}DVO_ENCODER_CONTROL_PS_ALLOCATION;
4516 5533
4517typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4518 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
4519 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
4520} DVO_ENCODER_CONTROL_PS_ALLOCATION;
4521 5534
4522#define ATOM_XTMDS_ASIC_SI164_ID 1 5535#define ATOM_XTMDS_ASIC_SI164_ID 1
4523#define ATOM_XTMDS_ASIC_SI178_ID 2 5536#define ATOM_XTMDS_ASIC_SI178_ID 2
@@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4526#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 5539#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
4527#define ATOM_XTMDS_MVPU_FPGA 0x00000004 5540#define ATOM_XTMDS_MVPU_FPGA 0x00000004
4528 5541
4529typedef struct _ATOM_XTMDS_INFO { 5542
4530 ATOM_COMMON_TABLE_HEADER sHeader; 5543typedef struct _ATOM_XTMDS_INFO
4531 USHORT usSingleLinkMaxFrequency; 5544{
4532 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */ 5545 ATOM_COMMON_TABLE_HEADER sHeader;
4533 UCHAR ucXtransimitterID; 5546 USHORT usSingleLinkMaxFrequency;
4534 UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */ 5547 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip
4535 UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */ 5548 UCHAR ucXtransimitterID;
4536 /* due to design. This ID is used to alert driver that the sequence is not "standard"! */ 5549 UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported
4537 UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */ 5550 UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters
4538 UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */ 5551 // due to design. This ID is used to alert driver that the sequence is not "standard"!
4539} ATOM_XTMDS_INFO; 5552 UCHAR ucMasterAddress; // Address to control Master xTMDS Chip
4540 5553 UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip
4541typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { 5554}ATOM_XTMDS_INFO;
4542 UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */ 5555
4543 UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */ 5556typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
4544 UCHAR ucPadding[2]; 5557{
4545} DFP_DPMS_STATUS_CHANGE_PARAMETERS; 5558 UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off
5559 UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX....
5560 UCHAR ucPadding[2];
5561}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
4546 5562
4547/****************************Legacy Power Play Table Definitions **********************/ 5563/****************************Legacy Power Play Table Definitions **********************/
4548 5564
4549/* Definitions for ulPowerPlayMiscInfo */ 5565//Definitions for ulPowerPlayMiscInfo
4550#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L 5566#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
4551#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L 5567#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
4552#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L 5568#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
@@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4558 5574
4559#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L 5575#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
4560#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L 5576#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
4561#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */ 5577#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
4562 5578
4563#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L 5579#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
4564#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L 5580#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
4565#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L 5581#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
@@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4569#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L 5585#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
4570 5586
4571#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L 5587#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
4572#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L 5588#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
4573#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L 5589#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
4574#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L 5590#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
4575#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L 5591#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
4576 5592
4577#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */ 5593#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
4578#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 5594#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
4579 5595
4580#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L 5596#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
4581#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L 5597#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
4582#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L 5598#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
4583#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */ 5599#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic
4584#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */ 5600#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic
4585#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */ 5601#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode
4586 5602
4587#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */ 5603#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
4588#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 5604#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
4589#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L 5605#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
4590 5606
@@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4594#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L 5610#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
4595#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L 5611#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
4596#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L 5612#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
4597#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */ 5613#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
4598 /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */ 5614 //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
4599#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L 5615#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
4600#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L 5616#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
4601#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L 5617#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
4602 5618
4603/* ucTableFormatRevision=1 */ 5619//ucTableFormatRevision=1
4604/* ucTableContentRevision=1 */ 5620//ucTableContentRevision=1
4605typedef struct _ATOM_POWERMODE_INFO { 5621typedef struct _ATOM_POWERMODE_INFO
4606 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5622{
4607 ULONG ulReserved1; /* must set to 0 */ 5623 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4608 ULONG ulReserved2; /* must set to 0 */ 5624 ULONG ulReserved1; // must set to 0
4609 USHORT usEngineClock; 5625 ULONG ulReserved2; // must set to 0
4610 USHORT usMemoryClock; 5626 USHORT usEngineClock;
4611 UCHAR ucVoltageDropIndex; /* index to GPIO table */ 5627 USHORT usMemoryClock;
4612 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5628 UCHAR ucVoltageDropIndex; // index to GPIO table
4613 UCHAR ucMinTemperature; 5629 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4614 UCHAR ucMaxTemperature; 5630 UCHAR ucMinTemperature;
4615 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5631 UCHAR ucMaxTemperature;
4616} ATOM_POWERMODE_INFO; 5632 UCHAR ucNumPciELanes; // number of PCIE lanes
4617 5633}ATOM_POWERMODE_INFO;
4618/* ucTableFormatRevision=2 */ 5634
4619/* ucTableContentRevision=1 */ 5635//ucTableFormatRevision=2
4620typedef struct _ATOM_POWERMODE_INFO_V2 { 5636//ucTableContentRevision=1
4621 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5637typedef struct _ATOM_POWERMODE_INFO_V2
4622 ULONG ulMiscInfo2; 5638{
4623 ULONG ulEngineClock; 5639 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4624 ULONG ulMemoryClock; 5640 ULONG ulMiscInfo2;
4625 UCHAR ucVoltageDropIndex; /* index to GPIO table */ 5641 ULONG ulEngineClock;
4626 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5642 ULONG ulMemoryClock;
4627 UCHAR ucMinTemperature; 5643 UCHAR ucVoltageDropIndex; // index to GPIO table
4628 UCHAR ucMaxTemperature; 5644 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4629 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5645 UCHAR ucMinTemperature;
4630} ATOM_POWERMODE_INFO_V2; 5646 UCHAR ucMaxTemperature;
4631 5647 UCHAR ucNumPciELanes; // number of PCIE lanes
4632/* ucTableFormatRevision=2 */ 5648}ATOM_POWERMODE_INFO_V2;
4633/* ucTableContentRevision=2 */ 5649
4634typedef struct _ATOM_POWERMODE_INFO_V3 { 5650//ucTableFormatRevision=2
4635 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5651//ucTableContentRevision=2
4636 ULONG ulMiscInfo2; 5652typedef struct _ATOM_POWERMODE_INFO_V3
4637 ULONG ulEngineClock; 5653{
4638 ULONG ulMemoryClock; 5654 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4639 UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */ 5655 ULONG ulMiscInfo2;
4640 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5656 ULONG ulEngineClock;
4641 UCHAR ucMinTemperature; 5657 ULONG ulMemoryClock;
4642 UCHAR ucMaxTemperature; 5658 UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table
4643 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5659 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4644 UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */ 5660 UCHAR ucMinTemperature;
4645} ATOM_POWERMODE_INFO_V3; 5661 UCHAR ucMaxTemperature;
5662 UCHAR ucNumPciELanes; // number of PCIE lanes
5663 UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table
5664}ATOM_POWERMODE_INFO_V3;
5665
4646 5666
4647#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 5667#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
4648 5668
@@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 {
4655#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 5675#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
4656#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 5676#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
4657#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 5677#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
4658#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */ 5678#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog
4659 5679
4660typedef struct _ATOM_POWERPLAY_INFO { 5680
4661 ATOM_COMMON_TABLE_HEADER sHeader; 5681typedef struct _ATOM_POWERPLAY_INFO
4662 UCHAR ucOverdriveThermalController; 5682{
4663 UCHAR ucOverdriveI2cLine; 5683 ATOM_COMMON_TABLE_HEADER sHeader;
4664 UCHAR ucOverdriveIntBitmap; 5684 UCHAR ucOverdriveThermalController;
4665 UCHAR ucOverdriveControllerAddress; 5685 UCHAR ucOverdriveI2cLine;
4666 UCHAR ucSizeOfPowerModeEntry; 5686 UCHAR ucOverdriveIntBitmap;
4667 UCHAR ucNumOfPowerModeEntries; 5687 UCHAR ucOverdriveControllerAddress;
4668 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5688 UCHAR ucSizeOfPowerModeEntry;
4669} ATOM_POWERPLAY_INFO; 5689 UCHAR ucNumOfPowerModeEntries;
4670 5690 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4671typedef struct _ATOM_POWERPLAY_INFO_V2 { 5691}ATOM_POWERPLAY_INFO;
4672 ATOM_COMMON_TABLE_HEADER sHeader; 5692
4673 UCHAR ucOverdriveThermalController; 5693typedef struct _ATOM_POWERPLAY_INFO_V2
4674 UCHAR ucOverdriveI2cLine; 5694{
4675 UCHAR ucOverdriveIntBitmap; 5695 ATOM_COMMON_TABLE_HEADER sHeader;
4676 UCHAR ucOverdriveControllerAddress; 5696 UCHAR ucOverdriveThermalController;
4677 UCHAR ucSizeOfPowerModeEntry; 5697 UCHAR ucOverdriveI2cLine;
4678 UCHAR ucNumOfPowerModeEntries; 5698 UCHAR ucOverdriveIntBitmap;
4679 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5699 UCHAR ucOverdriveControllerAddress;
4680} ATOM_POWERPLAY_INFO_V2; 5700 UCHAR ucSizeOfPowerModeEntry;
4681 5701 UCHAR ucNumOfPowerModeEntries;
4682typedef struct _ATOM_POWERPLAY_INFO_V3 { 5702 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4683 ATOM_COMMON_TABLE_HEADER sHeader; 5703}ATOM_POWERPLAY_INFO_V2;
4684 UCHAR ucOverdriveThermalController; 5704
4685 UCHAR ucOverdriveI2cLine; 5705typedef struct _ATOM_POWERPLAY_INFO_V3
4686 UCHAR ucOverdriveIntBitmap; 5706{
4687 UCHAR ucOverdriveControllerAddress; 5707 ATOM_COMMON_TABLE_HEADER sHeader;
4688 UCHAR ucSizeOfPowerModeEntry; 5708 UCHAR ucOverdriveThermalController;
4689 UCHAR ucNumOfPowerModeEntries; 5709 UCHAR ucOverdriveI2cLine;
4690 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5710 UCHAR ucOverdriveIntBitmap;
4691} ATOM_POWERPLAY_INFO_V3; 5711 UCHAR ucOverdriveControllerAddress;
5712 UCHAR ucSizeOfPowerModeEntry;
5713 UCHAR ucNumOfPowerModeEntries;
5714 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
5715}ATOM_POWERPLAY_INFO_V3;
4692 5716
4693/* New PPlib */ 5717/* New PPlib */
4694/**************************************************************************/ 5718/**************************************************************************/
@@ -4873,40 +5897,42 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4873 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} 5897 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
4874 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. 5898 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
4875 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). 5899 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
4876 ULONG ulFlags; 5900 ULONG ulFlags;
4877} ATOM_PPLIB_RS780_CLOCK_INFO; 5901} ATOM_PPLIB_RS780_CLOCK_INFO;
4878 5902
4879#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 5903#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
4880#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 5904#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
4881#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 5905#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
4882#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 5906#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
4883 5907
4884#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. 5908#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
4885#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 5909#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
4886#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 5910#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
4887 5911
4888#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 5912#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
4889#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 5913#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
4890#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 5914#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
4891 5915
4892/**************************************************************************/ 5916/**************************************************************************/
4893 5917
4894/* Following definitions are for compatiblity issue in different SW components. */ 5918
5919// Following definitions are for compatiblity issue in different SW components.
4895#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 5920#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
4896#define Object_Info Object_Header 5921#define Object_Info Object_Header
4897#define AdjustARB_SEQ MC_InitParameter 5922#define AdjustARB_SEQ MC_InitParameter
4898#define VRAM_GPIO_DetectionInfo VoltageObjectInfo 5923#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
4899#define ASIC_VDDCI_Info ASIC_ProfilingInfo 5924#define ASIC_VDDCI_Info ASIC_ProfilingInfo
4900#define ASIC_MVDDQ_Info MemoryTrainingInfo 5925#define ASIC_MVDDQ_Info MemoryTrainingInfo
4901#define SS_Info PPLL_SS_Info 5926#define SS_Info PPLL_SS_Info
4902#define ASIC_MVDDC_Info ASIC_InternalSS_Info 5927#define ASIC_MVDDC_Info ASIC_InternalSS_Info
4903#define DispDevicePriorityInfo SaveRestoreInfo 5928#define DispDevicePriorityInfo SaveRestoreInfo
4904#define DispOutInfo TV_VideoMode 5929#define DispOutInfo TV_VideoMode
4905 5930
5931
4906#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE 5932#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
4907#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE 5933#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
4908 5934
4909/* New device naming, remove them when both DAL/VBIOS is ready */ 5935//New device naming, remove them when both DAL/VBIOS is ready
4910#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS 5936#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
4911#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS 5937#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
4912 5938
@@ -4921,7 +5947,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4921 5947
4922#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX 5948#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
4923#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX 5949#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
4924 5950
4925#define ATOM_DEVICE_DFP2I_INDEX 0x00000009 5951#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
4926#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) 5952#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
4927 5953
@@ -4939,7 +5965,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4939 5965
4940#define ATOM_S3_DFP2I_ACTIVEb1 0x02 5966#define ATOM_S3_DFP2I_ACTIVEb1 0x02
4941 5967
4942#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE 5968#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
4943#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE 5969#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
4944 5970
4945#define ATOM_S3_DFP2I_ACTIVE 0x00000200L 5971#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
@@ -4958,14 +5984,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4958#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 5984#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
4959#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L 5985#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
4960 5986
4961#define TMDS1XEncoderControl DVOEncoderControl 5987#define TMDS1XEncoderControl DVOEncoderControl
4962#define DFP1XOutputControl DVOOutputControl 5988#define DFP1XOutputControl DVOOutputControl
4963 5989
4964#define ExternalDFPOutputControl DFP1XOutputControl 5990#define ExternalDFPOutputControl DFP1XOutputControl
4965#define EnableExternalTMDS_Encoder TMDS1XEncoderControl 5991#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
4966 5992
4967#define DFP1IOutputControl TMDSAOutputControl 5993#define DFP1IOutputControl TMDSAOutputControl
4968#define DFP2IOutputControl LVTMAOutputControl 5994#define DFP2IOutputControl LVTMAOutputControl
4969 5995
4970#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS 5996#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
4971#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION 5997#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
@@ -4974,7 +6000,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4974#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION 6000#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
4975 6001
4976#define ucDac1Standard ucDacStandard 6002#define ucDac1Standard ucDacStandard
4977#define ucDac2Standard ucDacStandard 6003#define ucDac2Standard ucDacStandard
4978 6004
4979#define TMDS1EncoderControl TMDSAEncoderControl 6005#define TMDS1EncoderControl TMDSAEncoderControl
4980#define TMDS2EncoderControl LVTMAEncoderControl 6006#define TMDS2EncoderControl LVTMAEncoderControl
@@ -4984,12 +6010,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4984#define CRT1OutputControl DAC1OutputControl 6010#define CRT1OutputControl DAC1OutputControl
4985#define CRT2OutputControl DAC2OutputControl 6011#define CRT2OutputControl DAC2OutputControl
4986 6012
4987/* These two lines will be removed for sure in a few days, will follow up with Michael V. */ 6013//These two lines will be removed for sure in a few days, will follow up with Michael V.
4988#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL 6014#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
4989#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL 6015#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
6016
6017//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
6018//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6019//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6020//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6021//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6022
6023#define ATOM_S6_ACC_REQ_TV2 0x00400000L
6024#define ATOM_DEVICE_TV2_INDEX 0x00000006
6025#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
6026#define ATOM_S0_TV2 0x00100000L
6027#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE
6028#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE
6029
6030//
6031#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
6032#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
6033#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
6034#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
6035#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
6036#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
6037#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
6038#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
6039#define ATOM_S2_CV_DPMS_STATE 0x01000000L
6040#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
6041#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
6042#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
6043
6044#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
6045#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
6046#define ATOM_S2_TV1_DPMS_STATEb2 0x04
6047#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
6048#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
6049#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
6050#define ATOM_S2_TV2_DPMS_STATEb2 0x40
6051#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
6052#define ATOM_S2_CV_DPMS_STATEb3 0x01
6053#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
6054#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
6055#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
6056
6057#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
6058#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
6059#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
4990 6060
4991/*********************************************************************************/ 6061/*********************************************************************************/
4992 6062
4993#pragma pack() /* BIOS data must use byte aligment */ 6063#pragma pack() // BIOS data must use byte aligment
4994 6064
4995#endif /* _ATOMBIOS_H */ 6065#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af464e351fbd..dd9fdf560611 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -245,21 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
245 245
246 switch (mode) { 246 switch (mode) {
247 case DRM_MODE_DPMS_ON: 247 case DRM_MODE_DPMS_ON:
248 atombios_enable_crtc(crtc, 1); 248 atombios_enable_crtc(crtc, ATOM_ENABLE);
249 if (ASIC_IS_DCE3(rdev)) 249 if (ASIC_IS_DCE3(rdev))
250 atombios_enable_crtc_memreq(crtc, 1); 250 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
251 atombios_blank_crtc(crtc, 0); 251 atombios_blank_crtc(crtc, ATOM_DISABLE);
252 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); 252 /* XXX re-enable when interrupt support is added */
253 if (!ASIC_IS_DCE4(rdev))
254 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
253 radeon_crtc_load_lut(crtc); 255 radeon_crtc_load_lut(crtc);
254 break; 256 break;
255 case DRM_MODE_DPMS_STANDBY: 257 case DRM_MODE_DPMS_STANDBY:
256 case DRM_MODE_DPMS_SUSPEND: 258 case DRM_MODE_DPMS_SUSPEND:
257 case DRM_MODE_DPMS_OFF: 259 case DRM_MODE_DPMS_OFF:
258 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 260 /* XXX re-enable when interrupt support is added */
259 atombios_blank_crtc(crtc, 1); 261 if (!ASIC_IS_DCE4(rdev))
262 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
263 atombios_blank_crtc(crtc, ATOM_ENABLE);
260 if (ASIC_IS_DCE3(rdev)) 264 if (ASIC_IS_DCE3(rdev))
261 atombios_enable_crtc_memreq(crtc, 0); 265 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
262 atombios_enable_crtc(crtc, 0); 266 atombios_enable_crtc(crtc, ATOM_DISABLE);
263 break; 267 break;
264 } 268 }
265} 269}
@@ -349,6 +353,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
350} 354}
351 355
356union atom_enable_ss {
357 ENABLE_LVDS_SS_PARAMETERS legacy;
358 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
359};
360
352static void atombios_set_ss(struct drm_crtc *crtc, int enable) 361static void atombios_set_ss(struct drm_crtc *crtc, int enable)
353{ 362{
354 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 363 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -358,11 +367,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
358 struct radeon_encoder *radeon_encoder = NULL; 367 struct radeon_encoder *radeon_encoder = NULL;
359 struct radeon_encoder_atom_dig *dig = NULL; 368 struct radeon_encoder_atom_dig *dig = NULL;
360 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); 369 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
361 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args; 370 union atom_enable_ss args;
362 ENABLE_LVDS_SS_PARAMETERS legacy_args;
363 uint16_t percentage = 0; 371 uint16_t percentage = 0;
364 uint8_t type = 0, step = 0, delay = 0, range = 0; 372 uint8_t type = 0, step = 0, delay = 0, range = 0;
365 373
374 /* XXX add ss support for DCE4 */
375 if (ASIC_IS_DCE4(rdev))
376 return;
377
366 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 378 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
367 if (encoder->crtc == crtc) { 379 if (encoder->crtc == crtc) {
368 radeon_encoder = to_radeon_encoder(encoder); 380 radeon_encoder = to_radeon_encoder(encoder);
@@ -386,29 +398,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
386 if (!radeon_encoder) 398 if (!radeon_encoder)
387 return; 399 return;
388 400
401 memset(&args, 0, sizeof(args));
389 if (ASIC_IS_AVIVO(rdev)) { 402 if (ASIC_IS_AVIVO(rdev)) {
390 memset(&args, 0, sizeof(args)); 403 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
391 args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 404 args.v1.ucSpreadSpectrumType = type;
392 args.ucSpreadSpectrumType = type; 405 args.v1.ucSpreadSpectrumStep = step;
393 args.ucSpreadSpectrumStep = step; 406 args.v1.ucSpreadSpectrumDelay = delay;
394 args.ucSpreadSpectrumDelay = delay; 407 args.v1.ucSpreadSpectrumRange = range;
395 args.ucSpreadSpectrumRange = range; 408 args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
396 args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 409 args.v1.ucEnable = enable;
397 args.ucEnable = enable;
398 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
399 } else { 410 } else {
400 memset(&legacy_args, 0, sizeof(legacy_args)); 411 args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
401 legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 412 args.legacy.ucSpreadSpectrumType = type;
402 legacy_args.ucSpreadSpectrumType = type; 413 args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
403 legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; 414 args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
404 legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; 415 args.legacy.ucEnable = enable;
405 legacy_args.ucEnable = enable;
406 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
407 } 416 }
417 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
408} 418}
409 419
410union adjust_pixel_clock { 420union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; 421 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
422 ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
412}; 423};
413 424
414static u32 atombios_adjust_pll(struct drm_crtc *crtc, 425static u32 atombios_adjust_pll(struct drm_crtc *crtc,
@@ -420,10 +431,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
420 struct drm_encoder *encoder = NULL; 431 struct drm_encoder *encoder = NULL;
421 struct radeon_encoder *radeon_encoder = NULL; 432 struct radeon_encoder *radeon_encoder = NULL;
422 u32 adjusted_clock = mode->clock; 433 u32 adjusted_clock = mode->clock;
434 int encoder_mode = 0;
423 435
424 /* reset the pll flags */ 436 /* reset the pll flags */
425 pll->flags = 0; 437 pll->flags = 0;
426 438
439 /* select the PLL algo */
440 if (ASIC_IS_AVIVO(rdev)) {
441 if (radeon_new_pll == 0)
442 pll->algo = PLL_ALGO_LEGACY;
443 else
444 pll->algo = PLL_ALGO_NEW;
445 } else {
446 if (radeon_new_pll == 1)
447 pll->algo = PLL_ALGO_NEW;
448 else
449 pll->algo = PLL_ALGO_LEGACY;
450 }
451
427 if (ASIC_IS_AVIVO(rdev)) { 452 if (ASIC_IS_AVIVO(rdev)) {
428 if ((rdev->family == CHIP_RS600) || 453 if ((rdev->family == CHIP_RS600) ||
429 (rdev->family == CHIP_RS690) || 454 (rdev->family == CHIP_RS690) ||
@@ -448,10 +473,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 473 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
449 if (encoder->crtc == crtc) { 474 if (encoder->crtc == crtc) {
450 radeon_encoder = to_radeon_encoder(encoder); 475 radeon_encoder = to_radeon_encoder(encoder);
476 encoder_mode = atombios_get_encoder_mode(encoder);
451 if (ASIC_IS_AVIVO(rdev)) { 477 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 478 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 479 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2; 480 adjusted_clock = mode->clock * 2;
481 /* LVDS PLL quirks */
482 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
484 pll->algo = dig->pll_algo;
485 }
455 } else { 486 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 487 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 488 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -468,14 +499,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
468 */ 499 */
469 if (ASIC_IS_DCE3(rdev)) { 500 if (ASIC_IS_DCE3(rdev)) {
470 union adjust_pixel_clock args; 501 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev; 502 u8 frev, crev;
473 int index; 503 int index;
474 504
475 if (!radeon_encoder->enc_priv)
476 return adjusted_clock;
477 dig = radeon_encoder->enc_priv;
478
479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 505 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 506 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
481 &crev); 507 &crev);
@@ -489,12 +515,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
489 case 2: 515 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 516 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 517 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); 518 args.v1.ucEncodeMode = encoder_mode;
493 519
494 atom_execute_table(rdev->mode_info.atom_context, 520 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args); 521 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 522 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break; 523 break;
524 case 3:
525 args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
526 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
527 args.v3.sInput.ucEncodeMode = encoder_mode;
528 args.v3.sInput.ucDispPllConfig = 0;
529 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
530 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
531
532 if (encoder_mode == ATOM_ENCODER_MODE_DP)
533 args.v3.sInput.ucDispPllConfig |=
534 DISPPLL_CONFIG_COHERENT_MODE;
535 else {
536 if (dig->coherent_mode)
537 args.v3.sInput.ucDispPllConfig |=
538 DISPPLL_CONFIG_COHERENT_MODE;
539 if (mode->clock > 165000)
540 args.v3.sInput.ucDispPllConfig |=
541 DISPPLL_CONFIG_DUAL_LINK;
542 }
543 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
544 /* may want to enable SS on DP/eDP eventually */
545 args.v3.sInput.ucDispPllConfig |=
546 DISPPLL_CONFIG_SS_ENABLE;
547 if (mode->clock > 165000)
548 args.v3.sInput.ucDispPllConfig |=
549 DISPPLL_CONFIG_DUAL_LINK;
550 }
551 atom_execute_table(rdev->mode_info.atom_context,
552 index, (uint32_t *)&args);
553 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
554 if (args.v3.sOutput.ucRefDiv) {
555 pll->flags |= RADEON_PLL_USE_REF_DIV;
556 pll->reference_div = args.v3.sOutput.ucRefDiv;
557 }
558 if (args.v3.sOutput.ucPostDiv) {
559 pll->flags |= RADEON_PLL_USE_POST_DIV;
560 pll->post_div = args.v3.sOutput.ucPostDiv;
561 }
562 break;
498 default: 563 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 564 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock; 565 return adjusted_clock;
@@ -513,9 +578,47 @@ union set_pixel_clock {
513 PIXEL_CLOCK_PARAMETERS v1; 578 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2; 579 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3; 580 PIXEL_CLOCK_PARAMETERS_V3 v3;
581 PIXEL_CLOCK_PARAMETERS_V5 v5;
516}; 582};
517 583
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 584static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
585{
586 struct drm_device *dev = crtc->dev;
587 struct radeon_device *rdev = dev->dev_private;
588 u8 frev, crev;
589 int index;
590 union set_pixel_clock args;
591
592 memset(&args, 0, sizeof(args));
593
594 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
595 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
596 &crev);
597
598 switch (frev) {
599 case 1:
600 switch (crev) {
601 case 5:
602 /* if the default dcpll clock is specified,
603 * SetPixelClock provides the dividers
604 */
605 args.v5.ucCRTC = ATOM_CRTC_INVALID;
606 args.v5.usPixelClock = rdev->clock.default_dispclk;
607 args.v5.ucPpll = ATOM_DCPLL;
608 break;
609 default:
610 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
611 return;
612 }
613 break;
614 default:
615 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
616 return;
617 }
618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
619}
620
621static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{ 622{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 623 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev; 624 struct drm_device *dev = crtc->dev;
@@ -529,12 +632,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 632 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll; 633 struct radeon_pll *pll;
531 u32 adjusted_clock; 634 u32 adjusted_clock;
635 int encoder_mode = 0;
532 636
533 memset(&args, 0, sizeof(args)); 637 memset(&args, 0, sizeof(args));
534 638
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 639 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) { 640 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder); 641 radeon_encoder = to_radeon_encoder(encoder);
642 encoder_mode = atombios_get_encoder_mode(encoder);
538 break; 643 break;
539 } 644 }
540 } 645 }
@@ -542,26 +647,24 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
542 if (!radeon_encoder) 647 if (!radeon_encoder)
543 return; 648 return;
544 649
545 if (radeon_crtc->crtc_id == 0) 650 switch (radeon_crtc->pll_id) {
651 case ATOM_PPLL1:
546 pll = &rdev->clock.p1pll; 652 pll = &rdev->clock.p1pll;
547 else 653 break;
654 case ATOM_PPLL2:
548 pll = &rdev->clock.p2pll; 655 pll = &rdev->clock.p2pll;
656 break;
657 case ATOM_DCPLL:
658 case ATOM_PPLL_INVALID:
659 pll = &rdev->clock.dcpll;
660 break;
661 }
549 662
550 /* adjust pixel clock as needed */ 663 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll); 664 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552 665
553 if (ASIC_IS_AVIVO(rdev)) { 666 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
554 if (radeon_new_pll) 667 &ref_div, &post_div);
555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
556 &fb_div, &frac_fb_div,
557 &ref_div, &post_div);
558 else
559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
560 &fb_div, &frac_fb_div,
561 &ref_div, &post_div);
562 } else
563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
564 &ref_div, &post_div);
565 668
566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 669 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 670 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -576,8 +679,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
576 args.v1.usFbDiv = cpu_to_le16(fb_div); 679 args.v1.usFbDiv = cpu_to_le16(fb_div);
577 args.v1.ucFracFbDiv = frac_fb_div; 680 args.v1.ucFracFbDiv = frac_fb_div;
578 args.v1.ucPostDiv = post_div; 681 args.v1.ucPostDiv = post_div;
579 args.v1.ucPpll = 682 args.v1.ucPpll = radeon_crtc->pll_id;
580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
581 args.v1.ucCRTC = radeon_crtc->crtc_id; 683 args.v1.ucCRTC = radeon_crtc->crtc_id;
582 args.v1.ucRefDivSrc = 1; 684 args.v1.ucRefDivSrc = 1;
583 break; 685 break;
@@ -587,8 +689,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
587 args.v2.usFbDiv = cpu_to_le16(fb_div); 689 args.v2.usFbDiv = cpu_to_le16(fb_div);
588 args.v2.ucFracFbDiv = frac_fb_div; 690 args.v2.ucFracFbDiv = frac_fb_div;
589 args.v2.ucPostDiv = post_div; 691 args.v2.ucPostDiv = post_div;
590 args.v2.ucPpll = 692 args.v2.ucPpll = radeon_crtc->pll_id;
591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
592 args.v2.ucCRTC = radeon_crtc->crtc_id; 693 args.v2.ucCRTC = radeon_crtc->crtc_id;
593 args.v2.ucRefDivSrc = 1; 694 args.v2.ucRefDivSrc = 1;
594 break; 695 break;
@@ -598,12 +699,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
598 args.v3.usFbDiv = cpu_to_le16(fb_div); 699 args.v3.usFbDiv = cpu_to_le16(fb_div);
599 args.v3.ucFracFbDiv = frac_fb_div; 700 args.v3.ucFracFbDiv = frac_fb_div;
600 args.v3.ucPostDiv = post_div; 701 args.v3.ucPostDiv = post_div;
601 args.v3.ucPpll = 702 args.v3.ucPpll = radeon_crtc->pll_id;
602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 703 args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
604 args.v3.ucTransmitterId = radeon_encoder->encoder_id; 704 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
605 args.v3.ucEncoderMode = 705 args.v3.ucEncoderMode = encoder_mode;
606 atombios_get_encoder_mode(encoder); 706 break;
707 case 5:
708 args.v5.ucCRTC = radeon_crtc->crtc_id;
709 args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
710 args.v5.ucRefDiv = ref_div;
711 args.v5.usFbDiv = cpu_to_le16(fb_div);
712 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
713 args.v5.ucPostDiv = post_div;
714 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
715 args.v5.ucTransmitterID = radeon_encoder->encoder_id;
716 args.v5.ucEncoderMode = encoder_mode;
717 args.v5.ucPpll = radeon_crtc->pll_id;
607 break; 718 break;
608 default: 719 default:
609 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 720 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -618,6 +729,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 729 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
619} 730}
620 731
732static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
733 struct drm_framebuffer *old_fb)
734{
735 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
736 struct drm_device *dev = crtc->dev;
737 struct radeon_device *rdev = dev->dev_private;
738 struct radeon_framebuffer *radeon_fb;
739 struct drm_gem_object *obj;
740 struct radeon_bo *rbo;
741 uint64_t fb_location;
742 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
743 int r;
744
745 /* no fb bound */
746 if (!crtc->fb) {
747 DRM_DEBUG("No FB bound\n");
748 return 0;
749 }
750
751 radeon_fb = to_radeon_framebuffer(crtc->fb);
752
753 /* Pin framebuffer & get tilling informations */
754 obj = radeon_fb->obj;
755 rbo = obj->driver_private;
756 r = radeon_bo_reserve(rbo, false);
757 if (unlikely(r != 0))
758 return r;
759 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
760 if (unlikely(r != 0)) {
761 radeon_bo_unreserve(rbo);
762 return -EINVAL;
763 }
764 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
765 radeon_bo_unreserve(rbo);
766
767 switch (crtc->fb->bits_per_pixel) {
768 case 8:
769 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
770 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
771 break;
772 case 15:
773 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
774 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
775 break;
776 case 16:
777 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
778 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
779 break;
780 case 24:
781 case 32:
782 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
783 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
784 break;
785 default:
786 DRM_ERROR("Unsupported screen depth %d\n",
787 crtc->fb->bits_per_pixel);
788 return -EINVAL;
789 }
790
791 switch (radeon_crtc->crtc_id) {
792 case 0:
793 WREG32(AVIVO_D1VGA_CONTROL, 0);
794 break;
795 case 1:
796 WREG32(AVIVO_D2VGA_CONTROL, 0);
797 break;
798 case 2:
799 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
800 break;
801 case 3:
802 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
803 break;
804 case 4:
805 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
806 break;
807 case 5:
808 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
809 break;
810 default:
811 break;
812 }
813
814 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
815 upper_32_bits(fb_location));
816 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
817 upper_32_bits(fb_location));
818 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
819 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
820 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
821 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
822 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
823
824 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
825 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
826 WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
827 WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
828 WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
829 WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
830
831 fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
832 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
833 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
834
835 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
836 crtc->mode.vdisplay);
837 x &= ~3;
838 y &= ~1;
839 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
840 (x << 16) | y);
841 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
842 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
843
844 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
845 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
846 EVERGREEN_INTERLEAVE_EN);
847 else
848 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
849
850 if (old_fb && old_fb != crtc->fb) {
851 radeon_fb = to_radeon_framebuffer(old_fb);
852 rbo = radeon_fb->obj->driver_private;
853 r = radeon_bo_reserve(rbo, false);
854 if (unlikely(r != 0))
855 return r;
856 radeon_bo_unpin(rbo);
857 radeon_bo_unreserve(rbo);
858 }
859
860 /* Bytes per pixel may have changed */
861 radeon_bandwidth_update(rdev);
862
863 return 0;
864}
865
621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, 866static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
622 struct drm_framebuffer *old_fb) 867 struct drm_framebuffer *old_fb)
623{ 868{
@@ -755,7 +1000,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
755 struct drm_device *dev = crtc->dev; 1000 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private; 1001 struct radeon_device *rdev = dev->dev_private;
757 1002
758 if (ASIC_IS_AVIVO(rdev)) 1003 if (ASIC_IS_DCE4(rdev))
1004 return evergreen_crtc_set_base(crtc, x, y, old_fb);
1005 else if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb); 1006 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else 1007 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb); 1008 return radeon_crtc_set_base(crtc, x, y, old_fb);
@@ -785,6 +1032,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
785 } 1032 }
786} 1033}
787 1034
1035static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1036{
1037 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1038 struct drm_device *dev = crtc->dev;
1039 struct radeon_device *rdev = dev->dev_private;
1040 struct drm_encoder *test_encoder;
1041 struct drm_crtc *test_crtc;
1042 uint32_t pll_in_use = 0;
1043
1044 if (ASIC_IS_DCE4(rdev)) {
1045 /* if crtc is driving DP and we have an ext clock, use that */
1046 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1047 if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
1048 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
1049 if (rdev->clock.dp_extclk)
1050 return ATOM_PPLL_INVALID;
1051 }
1052 }
1053 }
1054
1055 /* otherwise, pick one of the plls */
1056 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1057 struct radeon_crtc *radeon_test_crtc;
1058
1059 if (crtc == test_crtc)
1060 continue;
1061
1062 radeon_test_crtc = to_radeon_crtc(test_crtc);
1063 if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
1064 (radeon_test_crtc->pll_id <= ATOM_PPLL2))
1065 pll_in_use |= (1 << radeon_test_crtc->pll_id);
1066 }
1067 if (!(pll_in_use & 1))
1068 return ATOM_PPLL1;
1069 return ATOM_PPLL2;
1070 } else
1071 return radeon_crtc->crtc_id;
1072
1073}
1074
788int atombios_crtc_mode_set(struct drm_crtc *crtc, 1075int atombios_crtc_mode_set(struct drm_crtc *crtc,
789 struct drm_display_mode *mode, 1076 struct drm_display_mode *mode,
790 struct drm_display_mode *adjusted_mode, 1077 struct drm_display_mode *adjusted_mode,
@@ -796,19 +1083,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
796 1083
797 /* TODO color tiling */ 1084 /* TODO color tiling */
798 1085
1086 /* pick pll */
1087 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1088
799 atombios_set_ss(crtc, 0); 1089 atombios_set_ss(crtc, 0);
1090 /* always set DCPLL */
1091 if (ASIC_IS_DCE4(rdev))
1092 atombios_crtc_set_dcpll(crtc);
800 atombios_crtc_set_pll(crtc, adjusted_mode); 1093 atombios_crtc_set_pll(crtc, adjusted_mode);
801 atombios_set_ss(crtc, 1); 1094 atombios_set_ss(crtc, 1);
802 atombios_crtc_set_timing(crtc, adjusted_mode);
803 1095
804 if (ASIC_IS_AVIVO(rdev)) 1096 if (ASIC_IS_DCE4(rdev))
805 atombios_crtc_set_base(crtc, x, y, old_fb); 1097 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1098 else if (ASIC_IS_AVIVO(rdev))
1099 atombios_crtc_set_timing(crtc, adjusted_mode);
806 else { 1100 else {
1101 atombios_crtc_set_timing(crtc, adjusted_mode);
807 if (radeon_crtc->crtc_id == 0) 1102 if (radeon_crtc->crtc_id == 0)
808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1103 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
809 atombios_crtc_set_base(crtc, x, y, old_fb);
810 radeon_legacy_atom_fixup(crtc); 1104 radeon_legacy_atom_fixup(crtc);
811 } 1105 }
1106 atombios_crtc_set_base(crtc, x, y, old_fb);
812 atombios_overscan_setup(crtc, mode, adjusted_mode); 1107 atombios_overscan_setup(crtc, mode, adjusted_mode);
813 atombios_scaler_setup(crtc); 1108 atombios_scaler_setup(crtc);
814 return 0; 1109 return 0;
@@ -825,14 +1120,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
825 1120
826static void atombios_crtc_prepare(struct drm_crtc *crtc) 1121static void atombios_crtc_prepare(struct drm_crtc *crtc)
827{ 1122{
828 atombios_lock_crtc(crtc, 1); 1123 atombios_lock_crtc(crtc, ATOM_ENABLE);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1124 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
830} 1125}
831 1126
832static void atombios_crtc_commit(struct drm_crtc *crtc) 1127static void atombios_crtc_commit(struct drm_crtc *crtc)
833{ 1128{
834 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1129 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
835 atombios_lock_crtc(crtc, 0); 1130 atombios_lock_crtc(crtc, ATOM_DISABLE);
836} 1131}
837 1132
838static const struct drm_crtc_helper_funcs atombios_helper_funcs = { 1133static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -848,8 +1143,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
848void radeon_atombios_init_crtc(struct drm_device *dev, 1143void radeon_atombios_init_crtc(struct drm_device *dev,
849 struct radeon_crtc *radeon_crtc) 1144 struct radeon_crtc *radeon_crtc)
850{ 1145{
851 if (radeon_crtc->crtc_id == 1) 1146 struct radeon_device *rdev = dev->dev_private;
852 radeon_crtc->crtc_offset = 1147
853 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; 1148 if (ASIC_IS_DCE4(rdev)) {
1149 switch (radeon_crtc->crtc_id) {
1150 case 0:
1151 default:
1152 radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
1153 break;
1154 case 1:
1155 radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
1156 break;
1157 case 2:
1158 radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
1159 break;
1160 case 3:
1161 radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
1162 break;
1163 case 4:
1164 radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
1165 break;
1166 case 5:
1167 radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
1168 break;
1169 }
1170 } else {
1171 if (radeon_crtc->crtc_id == 1)
1172 radeon_crtc->crtc_offset =
1173 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
1174 else
1175 radeon_crtc->crtc_offset = 0;
1176 }
1177 radeon_crtc->pll_id = -1;
854 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 1178 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
855} 1179}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 99915a682d59..8a133bda00a2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
321 train_set[lane] = v | p; 321 train_set[lane] = v | p;
322} 322}
323 323
324union aux_channel_transaction {
325 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
326 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
327};
324 328
325/* radeon aux chan functions */ 329/* radeon aux chan functions */
326bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, 330bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
329{ 333{
330 struct drm_device *dev = chan->dev; 334 struct drm_device *dev = chan->dev;
331 struct radeon_device *rdev = dev->dev_private; 335 struct radeon_device *rdev = dev->dev_private;
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 336 union aux_channel_transaction args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 337 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 338 unsigned char *base;
335 int retry_count = 0; 339 int retry_count = 0;
@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
341retry: 345retry:
342 memcpy(base, req_bytes, num_bytes); 346 memcpy(base, req_bytes, num_bytes);
343 347
344 args.lpAuxRequest = 0; 348 args.v1.lpAuxRequest = 0;
345 args.lpDataOut = 16; 349 args.v1.lpDataOut = 16;
346 args.ucDataOutLen = 0; 350 args.v1.ucDataOutLen = 0;
347 args.ucChannelID = chan->rec.i2c_id; 351 args.v1.ucChannelID = chan->rec.i2c_id;
348 args.ucDelay = delay / 10; 352 args.v1.ucDelay = delay / 10;
353 if (ASIC_IS_DCE4(rdev))
354 args.v2.ucHPD_ID = chan->rec.hpd_id;
349 355
350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 356 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
351 357
352 if (args.ucReplyStatus && !args.ucDataOutLen) { 358 if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10) 359 if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry; 360 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 361 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 362 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
357 chan->rec.i2c_id, args.ucReplyStatus, retry_count); 363 chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
358 return false; 364 return false;
359 } 365 }
360 366
361 if (args.ucDataOutLen && read_byte && read_buf_len) { 367 if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
362 if (read_buf_len < args.ucDataOutLen) { 368 if (read_buf_len < args.v1.ucDataOutLen) {
363 DRM_ERROR("Buffer to small for return answer %d %d\n", 369 DRM_ERROR("Buffer to small for return answer %d %d\n",
364 read_buf_len, args.ucDataOutLen); 370 read_buf_len, args.v1.ucDataOutLen);
365 return false; 371 return false;
366 } 372 }
367 { 373 {
368 int len = min(read_buf_len, args.ucDataOutLen); 374 int len = min(read_buf_len, args.v1.ucDataOutLen);
369 memcpy(read_byte, base + 16, len); 375 memcpy(read_byte, base + 16, len);
370 } 376 }
371 } 377 }
@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
626 dp_set_link_bw_lanes(radeon_connector, link_configuration); 632 dp_set_link_bw_lanes(radeon_connector, link_configuration);
627 /* disable downspread on the sink */ 633 /* disable downspread on the sink */
628 dp_set_downspread(radeon_connector, 0); 634 dp_set_downspread(radeon_connector, 0);
629 /* start training on the source */ 635 if (ASIC_IS_DCE4(rdev)) {
630 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, 636 /* start training on the source */
631 dig_connector->dp_clock, enc_id, 0); 637 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
632 /* set training pattern 1 on the source */ 638 /* set training pattern 1 on the source */
633 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 639 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
634 dig_connector->dp_clock, enc_id, 0); 640 } else {
641 /* start training on the source */
642 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
643 dig_connector->dp_clock, enc_id, 0);
644 /* set training pattern 1 on the source */
645 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
646 dig_connector->dp_clock, enc_id, 0);
647 }
635 648
636 /* set initial vs/emph */ 649 /* set initial vs/emph */
637 memset(train_set, 0, 4); 650 memset(train_set, 0, 4);
@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder,
691 /* set training pattern 2 on the sink */ 704 /* set training pattern 2 on the sink */
692 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); 705 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
693 /* set training pattern 2 on the source */ 706 /* set training pattern 2 on the source */
694 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 707 if (ASIC_IS_DCE4(rdev))
695 dig_connector->dp_clock, enc_id, 1); 708 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
709 else
710 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
711 dig_connector->dp_clock, enc_id, 1);
696 712
697 /* channel equalization loop */ 713 /* channel equalization loop */
698 tries = 0; 714 tries = 0;
@@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder,
729 >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
730 746
731 /* disable the training pattern on the sink */ 747 /* disable the training pattern on the sink */
732 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); 748 if (ASIC_IS_DCE4(rdev))
749 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
750 else
751 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
752 dig_connector->dp_clock, enc_id, 0);
733 753
734 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, 754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
735 dig_connector->dp_clock, enc_id, 0); 755 dig_connector->dp_clock, enc_id, 0);
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index d4e6e6e4a938..3c391e7e9fd4 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -30,11 +30,13 @@
30 30
31#define D1CRTC_CONTROL 0x6080 31#define D1CRTC_CONTROL 0x6080
32#define CRTC_EN (1 << 0) 32#define CRTC_EN (1 << 0)
33#define D1CRTC_STATUS 0x609c
33#define D1CRTC_UPDATE_LOCK 0x60E8 34#define D1CRTC_UPDATE_LOCK 0x60E8
34#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 35#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
35#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 36#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
36 37
37#define D2CRTC_CONTROL 0x6880 38#define D2CRTC_CONTROL 0x6880
39#define D2CRTC_STATUS 0x689c
38#define D2CRTC_UPDATE_LOCK 0x68E8 40#define D2CRTC_UPDATE_LOCK 0x68E8
39#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910 41#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
40#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918 42#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 000000000000..bd2e7aa85c1d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,767 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "radeon_drm.h"
29#include "rv770d.h"
30#include "atom.h"
31#include "avivod.h"
32#include "evergreen_reg.h"
33
34static void evergreen_gpu_init(struct radeon_device *rdev);
35void evergreen_fini(struct radeon_device *rdev);
36
37bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
38{
39 bool connected = false;
40 /* XXX */
41 return connected;
42}
43
44void evergreen_hpd_set_polarity(struct radeon_device *rdev,
45 enum radeon_hpd_id hpd)
46{
47 /* XXX */
48}
49
50void evergreen_hpd_init(struct radeon_device *rdev)
51{
52 /* XXX */
53}
54
55
56void evergreen_bandwidth_update(struct radeon_device *rdev)
57{
58 /* XXX */
59}
60
61void evergreen_hpd_fini(struct radeon_device *rdev)
62{
63 /* XXX */
64}
65
66static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
67{
68 unsigned i;
69 u32 tmp;
70
71 for (i = 0; i < rdev->usec_timeout; i++) {
72 /* read MC_STATUS */
73 tmp = RREG32(SRBM_STATUS) & 0x1F00;
74 if (!tmp)
75 return 0;
76 udelay(1);
77 }
78 return -1;
79}
80
81/*
82 * GART
83 */
84int evergreen_pcie_gart_enable(struct radeon_device *rdev)
85{
86 u32 tmp;
87 int r, i;
88
89 if (rdev->gart.table.vram.robj == NULL) {
90 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
91 return -EINVAL;
92 }
93 r = radeon_gart_table_vram_pin(rdev);
94 if (r)
95 return r;
96 radeon_gart_restore(rdev);
97 /* Setup L2 cache */
98 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
99 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
100 EFFECTIVE_L2_QUEUE_SIZE(7));
101 WREG32(VM_L2_CNTL2, 0);
102 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
103 /* Setup TLB control */
104 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
105 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
106 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
107 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
108 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
109 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
110 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
111 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
112 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
115 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
116 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
117 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
118 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
119 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
120 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
121 (u32)(rdev->dummy_page.addr >> 12));
122 for (i = 1; i < 7; i++)
123 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
124
125 r600_pcie_gart_tlb_flush(rdev);
126 rdev->gart.ready = true;
127 return 0;
128}
129
130void evergreen_pcie_gart_disable(struct radeon_device *rdev)
131{
132 u32 tmp;
133 int i, r;
134
135 /* Disable all tables */
136 for (i = 0; i < 7; i++)
137 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
138
139 /* Setup L2 cache */
140 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
141 EFFECTIVE_L2_QUEUE_SIZE(7));
142 WREG32(VM_L2_CNTL2, 0);
143 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
144 /* Setup TLB control */
145 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
146 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
147 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
148 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
149 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
150 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
151 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
152 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
153 if (rdev->gart.table.vram.robj) {
154 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
155 if (likely(r == 0)) {
156 radeon_bo_kunmap(rdev->gart.table.vram.robj);
157 radeon_bo_unpin(rdev->gart.table.vram.robj);
158 radeon_bo_unreserve(rdev->gart.table.vram.robj);
159 }
160 }
161}
162
163void evergreen_pcie_gart_fini(struct radeon_device *rdev)
164{
165 evergreen_pcie_gart_disable(rdev);
166 radeon_gart_table_vram_free(rdev);
167 radeon_gart_fini(rdev);
168}
169
170
171void evergreen_agp_enable(struct radeon_device *rdev)
172{
173 u32 tmp;
174 int i;
175
176 /* Setup L2 cache */
177 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
178 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
179 EFFECTIVE_L2_QUEUE_SIZE(7));
180 WREG32(VM_L2_CNTL2, 0);
181 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
182 /* Setup TLB control */
183 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
184 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
185 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
186 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
187 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
188 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
189 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
190 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
191 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
192 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
193 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
194 for (i = 0; i < 7; i++)
195 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
196}
197
198static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
199{
200 save->vga_control[0] = RREG32(D1VGA_CONTROL);
201 save->vga_control[1] = RREG32(D2VGA_CONTROL);
202 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
203 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
204 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
205 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
206 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
207 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
208 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
209 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
210 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
211 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
212 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
213 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
214
215 /* Stop all video */
216 WREG32(VGA_RENDER_CONTROL, 0);
217 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
218 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
219 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
220 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
221 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
222 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
223 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
224 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
225 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
226 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
227 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
228 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
229 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
230 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
231 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
232 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
233 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
234 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
235
236 WREG32(D1VGA_CONTROL, 0);
237 WREG32(D2VGA_CONTROL, 0);
238 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
239 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
240 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
241 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
242}
243
244static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
245{
246 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
247 upper_32_bits(rdev->mc.vram_start));
248 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
249 upper_32_bits(rdev->mc.vram_start));
250 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
251 (u32)rdev->mc.vram_start);
252 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
253 (u32)rdev->mc.vram_start);
254
255 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
256 upper_32_bits(rdev->mc.vram_start));
257 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
258 upper_32_bits(rdev->mc.vram_start));
259 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
260 (u32)rdev->mc.vram_start);
261 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
262 (u32)rdev->mc.vram_start);
263
264 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
265 upper_32_bits(rdev->mc.vram_start));
266 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
267 upper_32_bits(rdev->mc.vram_start));
268 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
269 (u32)rdev->mc.vram_start);
270 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
271 (u32)rdev->mc.vram_start);
272
273 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
274 upper_32_bits(rdev->mc.vram_start));
275 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
276 upper_32_bits(rdev->mc.vram_start));
277 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
278 (u32)rdev->mc.vram_start);
279 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
280 (u32)rdev->mc.vram_start);
281
282 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
283 upper_32_bits(rdev->mc.vram_start));
284 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
285 upper_32_bits(rdev->mc.vram_start));
286 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
287 (u32)rdev->mc.vram_start);
288 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
289 (u32)rdev->mc.vram_start);
290
291 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
292 upper_32_bits(rdev->mc.vram_start));
293 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
294 upper_32_bits(rdev->mc.vram_start));
295 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
296 (u32)rdev->mc.vram_start);
297 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
298 (u32)rdev->mc.vram_start);
299
300 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
301 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
302 /* Unlock host access */
303 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
304 mdelay(1);
305 /* Restore video state */
306 WREG32(D1VGA_CONTROL, save->vga_control[0]);
307 WREG32(D2VGA_CONTROL, save->vga_control[1]);
308 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
309 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
310 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
311 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
312 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
313 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
314 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
315 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
316 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
317 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
318 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
319 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
320 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
321 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
322 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
323 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
324 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
325 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
326 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
327 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
328 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
329 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
330 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
331}
332
333static void evergreen_mc_program(struct radeon_device *rdev)
334{
335 struct evergreen_mc_save save;
336 u32 tmp;
337 int i, j;
338
339 /* Initialize HDP */
340 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
341 WREG32((0x2c14 + j), 0x00000000);
342 WREG32((0x2c18 + j), 0x00000000);
343 WREG32((0x2c1c + j), 0x00000000);
344 WREG32((0x2c20 + j), 0x00000000);
345 WREG32((0x2c24 + j), 0x00000000);
346 }
347 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
348
349 evergreen_mc_stop(rdev, &save);
350 if (evergreen_mc_wait_for_idle(rdev)) {
351 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
352 }
353 /* Lockout access through VGA aperture*/
354 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
355 /* Update configuration */
356 if (rdev->flags & RADEON_IS_AGP) {
357 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
358 /* VRAM before AGP */
359 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
360 rdev->mc.vram_start >> 12);
361 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
362 rdev->mc.gtt_end >> 12);
363 } else {
364 /* VRAM after AGP */
365 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
366 rdev->mc.gtt_start >> 12);
367 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
368 rdev->mc.vram_end >> 12);
369 }
370 } else {
371 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
372 rdev->mc.vram_start >> 12);
373 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
374 rdev->mc.vram_end >> 12);
375 }
376 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
377 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
378 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
379 WREG32(MC_VM_FB_LOCATION, tmp);
380 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
381 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
382 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
383 if (rdev->flags & RADEON_IS_AGP) {
384 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
385 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
386 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
387 } else {
388 WREG32(MC_VM_AGP_BASE, 0);
389 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
390 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
391 }
392 if (evergreen_mc_wait_for_idle(rdev)) {
393 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
394 }
395 evergreen_mc_resume(rdev, &save);
396 /* we need to own VRAM, so turn off the VGA renderer here
397 * to stop it overwriting our objects */
398 rv515_vga_render_disable(rdev);
399}
400
401#if 0
402/*
403 * CP.
404 */
405static void evergreen_cp_stop(struct radeon_device *rdev)
406{
407 /* XXX */
408}
409
410
411static int evergreen_cp_load_microcode(struct radeon_device *rdev)
412{
413 /* XXX */
414
415 return 0;
416}
417
418
419/*
420 * Core functions
421 */
422static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
423 u32 num_backends,
424 u32 backend_disable_mask)
425{
426 u32 backend_map = 0;
427
428 return backend_map;
429}
430#endif
431
432static void evergreen_gpu_init(struct radeon_device *rdev)
433{
434 /* XXX */
435}
436
437int evergreen_mc_init(struct radeon_device *rdev)
438{
439 fixed20_12 a;
440 u32 tmp;
441 int chansize, numchan;
442
443 /* Get VRAM informations */
444 rdev->mc.vram_is_ddr = true;
445 tmp = RREG32(MC_ARB_RAMCFG);
446 if (tmp & CHANSIZE_OVERRIDE) {
447 chansize = 16;
448 } else if (tmp & CHANSIZE_MASK) {
449 chansize = 64;
450 } else {
451 chansize = 32;
452 }
453 tmp = RREG32(MC_SHARED_CHMAP);
454 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
455 case 0:
456 default:
457 numchan = 1;
458 break;
459 case 1:
460 numchan = 2;
461 break;
462 case 2:
463 numchan = 4;
464 break;
465 case 3:
466 numchan = 8;
467 break;
468 }
469 rdev->mc.vram_width = numchan * chansize;
470 /* Could aper size report 0 ? */
471 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
472 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
473 /* Setup GPU memory space */
474 /* size in MB on evergreen */
475 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
476 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
477 rdev->mc.visible_vram_size = rdev->mc.aper_size;
478 /* FIXME remove this once we support unmappable VRAM */
479 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
480 rdev->mc.mc_vram_size = rdev->mc.aper_size;
481 rdev->mc.real_vram_size = rdev->mc.aper_size;
482 }
483 r600_vram_gtt_location(rdev, &rdev->mc);
484 /* FIXME: we should enforce default clock in case GPU is not in
485 * default setup
486 */
487 a.full = rfixed_const(100);
488 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
489 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
490 return 0;
491}
492
493int evergreen_gpu_reset(struct radeon_device *rdev)
494{
495 /* FIXME: implement for evergreen */
496 return 0;
497}
498
499static int evergreen_startup(struct radeon_device *rdev)
500{
501#if 0
502 int r;
503
504 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
505 r = r600_init_microcode(rdev);
506 if (r) {
507 DRM_ERROR("Failed to load firmware!\n");
508 return r;
509 }
510 }
511#endif
512 evergreen_mc_program(rdev);
513#if 0
514 if (rdev->flags & RADEON_IS_AGP) {
515 evergreem_agp_enable(rdev);
516 } else {
517 r = evergreen_pcie_gart_enable(rdev);
518 if (r)
519 return r;
520 }
521#endif
522 evergreen_gpu_init(rdev);
523#if 0
524 if (!rdev->r600_blit.shader_obj) {
525 r = r600_blit_init(rdev);
526 if (r) {
527 DRM_ERROR("radeon: failed blitter (%d).\n", r);
528 return r;
529 }
530 }
531
532 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
533 if (unlikely(r != 0))
534 return r;
535 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
536 &rdev->r600_blit.shader_gpu_addr);
537 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
538 if (r) {
539 DRM_ERROR("failed to pin blit object %d\n", r);
540 return r;
541 }
542
543 /* Enable IRQ */
544 r = r600_irq_init(rdev);
545 if (r) {
546 DRM_ERROR("radeon: IH init failed (%d).\n", r);
547 radeon_irq_kms_fini(rdev);
548 return r;
549 }
550 r600_irq_set(rdev);
551
552 r = radeon_ring_init(rdev, rdev->cp.ring_size);
553 if (r)
554 return r;
555 r = evergreen_cp_load_microcode(rdev);
556 if (r)
557 return r;
558 r = r600_cp_resume(rdev);
559 if (r)
560 return r;
561 /* write back buffer are not vital so don't worry about failure */
562 r600_wb_enable(rdev);
563#endif
564 return 0;
565}
566
567int evergreen_resume(struct radeon_device *rdev)
568{
569 int r;
570
571 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
572 * posting will perform necessary task to bring back GPU into good
573 * shape.
574 */
575 /* post card */
576 atom_asic_init(rdev->mode_info.atom_context);
577 /* Initialize clocks */
578 r = radeon_clocks_init(rdev);
579 if (r) {
580 return r;
581 }
582
583 r = evergreen_startup(rdev);
584 if (r) {
585 DRM_ERROR("r600 startup failed on resume\n");
586 return r;
587 }
588#if 0
589 r = r600_ib_test(rdev);
590 if (r) {
591 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
592 return r;
593 }
594#endif
595 return r;
596
597}
598
599int evergreen_suspend(struct radeon_device *rdev)
600{
601#if 0
602 int r;
603
604 /* FIXME: we should wait for ring to be empty */
605 r700_cp_stop(rdev);
606 rdev->cp.ready = false;
607 r600_wb_disable(rdev);
608 evergreen_pcie_gart_disable(rdev);
609 /* unpin shaders bo */
610 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
611 if (likely(r == 0)) {
612 radeon_bo_unpin(rdev->r600_blit.shader_obj);
613 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
614 }
615#endif
616 return 0;
617}
618
619static bool evergreen_card_posted(struct radeon_device *rdev)
620{
621 u32 reg;
622
623 /* first check CRTCs */
624 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
625 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
626 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
627 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
628 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
629 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
630 if (reg & EVERGREEN_CRTC_MASTER_EN)
631 return true;
632
633 /* then check MEM_SIZE, in case the crtcs are off */
634 if (RREG32(CONFIG_MEMSIZE))
635 return true;
636
637 return false;
638}
639
640/* Plan is to move initialization in that function and use
641 * helper function so that radeon_device_init pretty much
642 * do nothing more than calling asic specific function. This
643 * should also allow to remove a bunch of callback function
644 * like vram_info.
645 */
646int evergreen_init(struct radeon_device *rdev)
647{
648 int r;
649
650 r = radeon_dummy_page_init(rdev);
651 if (r)
652 return r;
653 /* This don't do much */
654 r = radeon_gem_init(rdev);
655 if (r)
656 return r;
657 /* Read BIOS */
658 if (!radeon_get_bios(rdev)) {
659 if (ASIC_IS_AVIVO(rdev))
660 return -EINVAL;
661 }
662 /* Must be an ATOMBIOS */
663 if (!rdev->is_atom_bios) {
664 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
665 return -EINVAL;
666 }
667 r = radeon_atombios_init(rdev);
668 if (r)
669 return r;
670 /* Post card if necessary */
671 if (!evergreen_card_posted(rdev)) {
672 if (!rdev->bios) {
673 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
674 return -EINVAL;
675 }
676 DRM_INFO("GPU not posted. posting now...\n");
677 atom_asic_init(rdev->mode_info.atom_context);
678 }
679 /* Initialize scratch registers */
680 r600_scratch_init(rdev);
681 /* Initialize surface registers */
682 radeon_surface_init(rdev);
683 /* Initialize clocks */
684 radeon_get_clock_info(rdev->ddev);
685 r = radeon_clocks_init(rdev);
686 if (r)
687 return r;
688 /* Initialize power management */
689 radeon_pm_init(rdev);
690 /* Fence driver */
691 r = radeon_fence_driver_init(rdev);
692 if (r)
693 return r;
694 /* initialize AGP */
695 if (rdev->flags & RADEON_IS_AGP) {
696 r = radeon_agp_init(rdev);
697 if (r)
698 radeon_agp_disable(rdev);
699 }
700 /* initialize memory controller */
701 r = evergreen_mc_init(rdev);
702 if (r)
703 return r;
704 /* Memory manager */
705 r = radeon_bo_init(rdev);
706 if (r)
707 return r;
708#if 0
709 r = radeon_irq_kms_init(rdev);
710 if (r)
711 return r;
712
713 rdev->cp.ring_obj = NULL;
714 r600_ring_init(rdev, 1024 * 1024);
715
716 rdev->ih.ring_obj = NULL;
717 r600_ih_ring_init(rdev, 64 * 1024);
718
719 r = r600_pcie_gart_init(rdev);
720 if (r)
721 return r;
722#endif
723 rdev->accel_working = false;
724 r = evergreen_startup(rdev);
725 if (r) {
726 evergreen_suspend(rdev);
727 /*r600_wb_fini(rdev);*/
728 /*radeon_ring_fini(rdev);*/
729 /*evergreen_pcie_gart_fini(rdev);*/
730 rdev->accel_working = false;
731 }
732 if (rdev->accel_working) {
733 r = radeon_ib_pool_init(rdev);
734 if (r) {
735 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
736 rdev->accel_working = false;
737 }
738 r = r600_ib_test(rdev);
739 if (r) {
740 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
741 rdev->accel_working = false;
742 }
743 }
744 return 0;
745}
746
747void evergreen_fini(struct radeon_device *rdev)
748{
749 evergreen_suspend(rdev);
750#if 0
751 r600_blit_fini(rdev);
752 r600_irq_fini(rdev);
753 radeon_irq_kms_fini(rdev);
754 radeon_ring_fini(rdev);
755 r600_wb_fini(rdev);
756 evergreen_pcie_gart_fini(rdev);
757#endif
758 radeon_gem_fini(rdev);
759 radeon_fence_driver_fini(rdev);
760 radeon_clocks_fini(rdev);
761 radeon_agp_fini(rdev);
762 radeon_bo_fini(rdev);
763 radeon_atombios_fini(rdev);
764 kfree(rdev->bios);
765 rdev->bios = NULL;
766 radeon_dummy_page_fini(rdev);
767}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 000000000000..f7c7c9643433
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef __EVERGREEN_REG_H__
25#define __EVERGREEN_REG_H__
26
27/* evergreen */
28#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
29#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
30#define EVERGREEN_D3VGA_CONTROL 0x3e0
31#define EVERGREEN_D4VGA_CONTROL 0x3e4
32#define EVERGREEN_D5VGA_CONTROL 0x3e8
33#define EVERGREEN_D6VGA_CONTROL 0x3ec
34
35#define EVERGREEN_P1PLL_SS_CNTL 0x414
36#define EVERGREEN_P2PLL_SS_CNTL 0x454
37# define EVERGREEN_PxPLL_SS_EN (1 << 12)
38/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
39#define EVERGREEN_GRPH_ENABLE 0x6800
40#define EVERGREEN_GRPH_CONTROL 0x6804
41# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
42# define EVERGREEN_GRPH_DEPTH_8BPP 0
43# define EVERGREEN_GRPH_DEPTH_16BPP 1
44# define EVERGREEN_GRPH_DEPTH_32BPP 2
45# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
46/* 8 BPP */
47# define EVERGREEN_GRPH_FORMAT_INDEXED 0
48/* 16 BPP */
49# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
50# define EVERGREEN_GRPH_FORMAT_ARGB565 1
51# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
52# define EVERGREEN_GRPH_FORMAT_AI88 3
53# define EVERGREEN_GRPH_FORMAT_MONO16 4
54# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
55/* 32 BPP */
56# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
57# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
58# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
59# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
60# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7
64#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
65# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
66# define EVERGREEN_GRPH_ENDIAN_NONE 0
67# define EVERGREEN_GRPH_ENDIAN_8IN16 1
68# define EVERGREEN_GRPH_ENDIAN_8IN32 2
69# define EVERGREEN_GRPH_ENDIAN_8IN64 3
70# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
71# define EVERGREEN_GRPH_RED_SEL_R 0
72# define EVERGREEN_GRPH_RED_SEL_G 1
73# define EVERGREEN_GRPH_RED_SEL_B 2
74# define EVERGREEN_GRPH_RED_SEL_A 3
75# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
76# define EVERGREEN_GRPH_GREEN_SEL_G 0
77# define EVERGREEN_GRPH_GREEN_SEL_B 1
78# define EVERGREEN_GRPH_GREEN_SEL_A 2
79# define EVERGREEN_GRPH_GREEN_SEL_R 3
80# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
81# define EVERGREEN_GRPH_BLUE_SEL_B 0
82# define EVERGREEN_GRPH_BLUE_SEL_A 1
83# define EVERGREEN_GRPH_BLUE_SEL_R 2
84# define EVERGREEN_GRPH_BLUE_SEL_G 3
85# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
86# define EVERGREEN_GRPH_ALPHA_SEL_A 0
87# define EVERGREEN_GRPH_ALPHA_SEL_R 1
88# define EVERGREEN_GRPH_ALPHA_SEL_G 2
89# define EVERGREEN_GRPH_ALPHA_SEL_B 3
90#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
91#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
92# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
93# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
94#define EVERGREEN_GRPH_PITCH 0x6818
95#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
96#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
97#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
98#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
99#define EVERGREEN_GRPH_X_START 0x682c
100#define EVERGREEN_GRPH_Y_START 0x6830
101#define EVERGREEN_GRPH_X_END 0x6834
102#define EVERGREEN_GRPH_Y_END 0x6838
103
104/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
105#define EVERGREEN_CUR_CONTROL 0x6998
106# define EVERGREEN_CURSOR_EN (1 << 0)
107# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
108# define EVERGREEN_CURSOR_MONO 0
109# define EVERGREEN_CURSOR_24_1 1
110# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
111# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
112# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
113# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
114# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
115# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
116# define EVERGREEN_CURSOR_URGENT_1_8 1
117# define EVERGREEN_CURSOR_URGENT_1_4 2
118# define EVERGREEN_CURSOR_URGENT_3_8 3
119# define EVERGREEN_CURSOR_URGENT_1_2 4
120#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
121# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
122#define EVERGREEN_CUR_SIZE 0x69a0
123#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
124#define EVERGREEN_CUR_POSITION 0x69a8
125#define EVERGREEN_CUR_HOT_SPOT 0x69ac
126#define EVERGREEN_CUR_COLOR1 0x69b0
127#define EVERGREEN_CUR_COLOR2 0x69b4
128#define EVERGREEN_CUR_UPDATE 0x69b8
129# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
130# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
131# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
132# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
133
134/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
135#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
136#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
137#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
138#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
139#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
140#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
141#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
142#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
143#define EVERGREEN_DC_LUT_CONTROL 0x6a00
144#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
145#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
146#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
147#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
148#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
149#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
150
151#define EVERGREEN_DATA_FORMAT 0x6b00
152# define EVERGREEN_INTERLEAVE_EN (1 << 0)
153#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
154
155#define EVERGREEN_VIEWPORT_START 0x6d70
156#define EVERGREEN_VIEWPORT_SIZE 0x6d74
157
158/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
159#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
160#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
161#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
162#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
163#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
164#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
165
166/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
167#define EVERGREEN_CRTC_CONTROL 0x6e70
168# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
169#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
170
171#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
172#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
173#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
174#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
175
176#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c0d4650cdb79..91eb762eb3f9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -197,13 +197,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
197{ 197{
198 uint32_t tmp; 198 uint32_t tmp;
199 199
200 radeon_gart_restore(rdev);
200 /* discard memory request outside of configured range */ 201 /* discard memory request outside of configured range */
201 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 202 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
202 WREG32(RADEON_AIC_CNTL, tmp); 203 WREG32(RADEON_AIC_CNTL, tmp);
203 /* set address range for PCI address translate */ 204 /* set address range for PCI address translate */
204 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); 205 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
205 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 206 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
206 WREG32(RADEON_AIC_HI_ADDR, tmp);
207 /* set PCI GART page-table base address */ 207 /* set PCI GART page-table base address */
208 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 208 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
209 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 209 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -312,9 +312,11 @@ int r100_irq_process(struct radeon_device *rdev)
312 /* Vertical blank interrupts */ 312 /* Vertical blank interrupts */
313 if (status & RADEON_CRTC_VBLANK_STAT) { 313 if (status & RADEON_CRTC_VBLANK_STAT) {
314 drm_handle_vblank(rdev->ddev, 0); 314 drm_handle_vblank(rdev->ddev, 0);
315 wake_up(&rdev->irq.vblank_queue);
315 } 316 }
316 if (status & RADEON_CRTC2_VBLANK_STAT) { 317 if (status & RADEON_CRTC2_VBLANK_STAT) {
317 drm_handle_vblank(rdev->ddev, 1); 318 drm_handle_vblank(rdev->ddev, 1);
319 wake_up(&rdev->irq.vblank_queue);
318 } 320 }
319 if (status & RADEON_FP_DETECT_STAT) { 321 if (status & RADEON_FP_DETECT_STAT) {
320 queue_hotplug = true; 322 queue_hotplug = true;
@@ -366,8 +368,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 368 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); 369 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
368 /* Wait until IDLE & CLEAN */ 370 /* Wait until IDLE & CLEAN */
369 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 371 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
370 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 372 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
371 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 373 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
372 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | 374 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
373 RADEON_HDP_READ_BUFFER_INVALIDATE); 375 RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -1701,7 +1703,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
1701 } 1703 }
1702 for (i = 0; i < rdev->usec_timeout; i++) { 1704 for (i = 0; i < rdev->usec_timeout; i++) {
1703 tmp = RREG32(RADEON_RBBM_STATUS); 1705 tmp = RREG32(RADEON_RBBM_STATUS);
1704 if (!(tmp & (1 << 31))) { 1706 if (!(tmp & RADEON_RBBM_ACTIVE)) {
1705 return 0; 1707 return 0;
1706 } 1708 }
1707 DRM_UDELAY(1); 1709 DRM_UDELAY(1);
@@ -1716,8 +1718,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
1716 1718
1717 for (i = 0; i < rdev->usec_timeout; i++) { 1719 for (i = 0; i < rdev->usec_timeout; i++) {
1718 /* read MC_STATUS */ 1720 /* read MC_STATUS */
1719 tmp = RREG32(0x0150); 1721 tmp = RREG32(RADEON_MC_STATUS);
1720 if (tmp & (1 << 2)) { 1722 if (tmp & RADEON_MC_IDLE) {
1721 return 0; 1723 return 0;
1722 } 1724 }
1723 DRM_UDELAY(1); 1725 DRM_UDELAY(1);
@@ -1790,7 +1792,7 @@ int r100_gpu_reset(struct radeon_device *rdev)
1790 } 1792 }
1791 /* Check if GPU is idle */ 1793 /* Check if GPU is idle */
1792 status = RREG32(RADEON_RBBM_STATUS); 1794 status = RREG32(RADEON_RBBM_STATUS);
1793 if (status & (1 << 31)) { 1795 if (status & RADEON_RBBM_ACTIVE) {
1794 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 1796 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1795 return -1; 1797 return -1;
1796 } 1798 }
@@ -1800,6 +1802,9 @@ int r100_gpu_reset(struct radeon_device *rdev)
1800 1802
1801void r100_set_common_regs(struct radeon_device *rdev) 1803void r100_set_common_regs(struct radeon_device *rdev)
1802{ 1804{
1805 struct drm_device *dev = rdev->ddev;
1806 bool force_dac2 = false;
1807
1803 /* set these so they don't interfere with anything */ 1808 /* set these so they don't interfere with anything */
1804 WREG32(RADEON_OV0_SCALE_CNTL, 0); 1809 WREG32(RADEON_OV0_SCALE_CNTL, 0);
1805 WREG32(RADEON_SUBPIC_CNTL, 0); 1810 WREG32(RADEON_SUBPIC_CNTL, 0);
@@ -1808,6 +1813,68 @@ void r100_set_common_regs(struct radeon_device *rdev)
1808 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 1813 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
1809 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 1814 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
1810 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 1815 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
1816
1817 /* always set up dac2 on rn50 and some rv100 as lots
1818 * of servers seem to wire it up to a VGA port but
1819 * don't report it in the bios connector
1820 * table.
1821 */
1822 switch (dev->pdev->device) {
1823 /* RN50 */
1824 case 0x515e:
1825 case 0x5969:
1826 force_dac2 = true;
1827 break;
1828 /* RV100*/
1829 case 0x5159:
1830 case 0x515a:
1831 /* DELL triple head servers */
1832 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
1833 ((dev->pdev->subsystem_device == 0x016c) ||
1834 (dev->pdev->subsystem_device == 0x016d) ||
1835 (dev->pdev->subsystem_device == 0x016e) ||
1836 (dev->pdev->subsystem_device == 0x016f) ||
1837 (dev->pdev->subsystem_device == 0x0170) ||
1838 (dev->pdev->subsystem_device == 0x017d) ||
1839 (dev->pdev->subsystem_device == 0x017e) ||
1840 (dev->pdev->subsystem_device == 0x0183) ||
1841 (dev->pdev->subsystem_device == 0x018a) ||
1842 (dev->pdev->subsystem_device == 0x019a)))
1843 force_dac2 = true;
1844 break;
1845 }
1846
1847 if (force_dac2) {
1848 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1849 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1850 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
1851
1852 /* For CRT on DAC2, don't turn it on if BIOS didn't
1853 enable it, even it's detected.
1854 */
1855
1856 /* force it to crtc0 */
1857 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
1858 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
1859 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
1860
1861 /* set up the TV DAC */
1862 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
1863 RADEON_TV_DAC_STD_MASK |
1864 RADEON_TV_DAC_RDACPD |
1865 RADEON_TV_DAC_GDACPD |
1866 RADEON_TV_DAC_BDACPD |
1867 RADEON_TV_DAC_BGADJ_MASK |
1868 RADEON_TV_DAC_DACADJ_MASK);
1869 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1870 RADEON_TV_DAC_NHOLD |
1871 RADEON_TV_DAC_STD_PS2 |
1872 (0x58 << 16));
1873
1874 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1875 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1876 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1877 }
1811} 1878}
1812 1879
1813/* 1880/*
@@ -1889,17 +1956,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
1889void r100_vram_init_sizes(struct radeon_device *rdev) 1956void r100_vram_init_sizes(struct radeon_device *rdev)
1890{ 1957{
1891 u64 config_aper_size; 1958 u64 config_aper_size;
1892 u32 accessible;
1893 1959
1960 /* work out accessible VRAM */
1961 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1962 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1963 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
1964 /* FIXME we don't use the second aperture yet when we could use it */
1965 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
1966 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1894 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 1967 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1895
1896 if (rdev->flags & RADEON_IS_IGP) { 1968 if (rdev->flags & RADEON_IS_IGP) {
1897 uint32_t tom; 1969 uint32_t tom;
1898 /* read NB_TOM to get the amount of ram stolen for the GPU */ 1970 /* read NB_TOM to get the amount of ram stolen for the GPU */
1899 tom = RREG32(RADEON_NB_TOM); 1971 tom = RREG32(RADEON_NB_TOM);
1900 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 1972 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1901 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1902 rdev->mc.vram_location = (tom & 0xffff) << 16;
1903 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1973 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1904 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1974 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1905 } else { 1975 } else {
@@ -1911,30 +1981,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
1911 rdev->mc.real_vram_size = 8192 * 1024; 1981 rdev->mc.real_vram_size = 8192 * 1024;
1912 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1982 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1913 } 1983 }
1914 /* let driver place VRAM */ 1984 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1915 rdev->mc.vram_location = 0xFFFFFFFFUL; 1985 * Novell bug 204882 + along with lots of ubuntu ones
1916 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 1986 */
1917 * Novell bug 204882 + along with lots of ubuntu ones */
1918 if (config_aper_size > rdev->mc.real_vram_size) 1987 if (config_aper_size > rdev->mc.real_vram_size)
1919 rdev->mc.mc_vram_size = config_aper_size; 1988 rdev->mc.mc_vram_size = config_aper_size;
1920 else 1989 else
1921 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1990 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1922 } 1991 }
1923 1992 /* FIXME remove this once we support unmappable VRAM */
1924 /* work out accessible VRAM */ 1993 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
1925 accessible = r100_get_accessible_vram(rdev);
1926
1927 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1928 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1929
1930 if (accessible > rdev->mc.aper_size)
1931 accessible = rdev->mc.aper_size;
1932
1933 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1934 rdev->mc.mc_vram_size = rdev->mc.aper_size; 1994 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1935
1936 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1937 rdev->mc.real_vram_size = rdev->mc.aper_size; 1995 rdev->mc.real_vram_size = rdev->mc.aper_size;
1996 }
1938} 1997}
1939 1998
1940void r100_vga_set_state(struct radeon_device *rdev, bool state) 1999void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -1951,11 +2010,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
1951 WREG32(RADEON_CONFIG_CNTL, temp); 2010 WREG32(RADEON_CONFIG_CNTL, temp);
1952} 2011}
1953 2012
1954void r100_vram_info(struct radeon_device *rdev) 2013void r100_mc_init(struct radeon_device *rdev)
1955{ 2014{
1956 r100_vram_get_type(rdev); 2015 u64 base;
1957 2016
2017 r100_vram_get_type(rdev);
1958 r100_vram_init_sizes(rdev); 2018 r100_vram_init_sizes(rdev);
2019 base = rdev->mc.aper_base;
2020 if (rdev->flags & RADEON_IS_IGP)
2021 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2022 radeon_vram_location(rdev, &rdev->mc, base);
2023 if (!(rdev->flags & RADEON_IS_AGP))
2024 radeon_gtt_location(rdev, &rdev->mc);
1959} 2025}
1960 2026
1961 2027
@@ -3226,10 +3292,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3226void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3292void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3227{ 3293{
3228 /* Update base address for crtc */ 3294 /* Update base address for crtc */
3229 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); 3295 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3230 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3296 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3231 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, 3297 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3232 rdev->mc.vram_location);
3233 } 3298 }
3234 /* Restore CRTC registers */ 3299 /* Restore CRTC registers */
3235 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3300 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
@@ -3390,32 +3455,6 @@ void r100_fini(struct radeon_device *rdev)
3390 rdev->bios = NULL; 3455 rdev->bios = NULL;
3391} 3456}
3392 3457
3393int r100_mc_init(struct radeon_device *rdev)
3394{
3395 int r;
3396 u32 tmp;
3397
3398 /* Setup GPU memory space */
3399 rdev->mc.vram_location = 0xFFFFFFFFUL;
3400 rdev->mc.gtt_location = 0xFFFFFFFFUL;
3401 if (rdev->flags & RADEON_IS_IGP) {
3402 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
3403 rdev->mc.vram_location = tmp << 16;
3404 }
3405 if (rdev->flags & RADEON_IS_AGP) {
3406 r = radeon_agp_init(rdev);
3407 if (r) {
3408 radeon_agp_disable(rdev);
3409 } else {
3410 rdev->mc.gtt_location = rdev->mc.agp_base;
3411 }
3412 }
3413 r = radeon_mc_setup(rdev);
3414 if (r)
3415 return r;
3416 return 0;
3417}
3418
3419int r100_init(struct radeon_device *rdev) 3458int r100_init(struct radeon_device *rdev)
3420{ 3459{
3421 int r; 3460 int r;
@@ -3458,12 +3497,15 @@ int r100_init(struct radeon_device *rdev)
3458 radeon_get_clock_info(rdev->ddev); 3497 radeon_get_clock_info(rdev->ddev);
3459 /* Initialize power management */ 3498 /* Initialize power management */
3460 radeon_pm_init(rdev); 3499 radeon_pm_init(rdev);
3461 /* Get vram informations */ 3500 /* initialize AGP */
3462 r100_vram_info(rdev); 3501 if (rdev->flags & RADEON_IS_AGP) {
3463 /* Initialize memory controller (also test AGP) */ 3502 r = radeon_agp_init(rdev);
3464 r = r100_mc_init(rdev); 3503 if (r) {
3465 if (r) 3504 radeon_agp_disable(rdev);
3466 return r; 3505 }
3506 }
3507 /* initialize VRAM */
3508 r100_mc_init(rdev);
3467 /* Fence driver */ 3509 /* Fence driver */
3468 r = radeon_fence_driver_init(rdev); 3510 r = radeon_fence_driver_init(rdev);
3469 if (r) 3511 if (r)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index ff1e0cd608bf..1146c9909c2c 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -31,6 +31,7 @@
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33 33
34#include "r100d.h"
34#include "r200_reg_safe.h" 35#include "r200_reg_safe.h"
35 36
36#include "r100_track.h" 37#include "r100_track.h"
@@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
79 return vtx_size; 80 return vtx_size;
80} 81}
81 82
83int r200_copy_dma(struct radeon_device *rdev,
84 uint64_t src_offset,
85 uint64_t dst_offset,
86 unsigned num_pages,
87 struct radeon_fence *fence)
88{
89 uint32_t size;
90 uint32_t cur_size;
91 int i, num_loops;
92 int r = 0;
93
94 /* radeon pitch is /64 */
95 size = num_pages << PAGE_SHIFT;
96 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
97 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
98 if (r) {
99 DRM_ERROR("radeon: moving bo (%d).\n", r);
100 return r;
101 }
102 /* Must wait for 2D idle & clean before DMA or hangs might happen */
103 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
104 radeon_ring_write(rdev, (1 << 16));
105 for (i = 0; i < num_loops; i++) {
106 cur_size = size;
107 if (cur_size > 0x1FFFFF) {
108 cur_size = 0x1FFFFF;
109 }
110 size -= cur_size;
111 radeon_ring_write(rdev, PACKET0(0x720, 2));
112 radeon_ring_write(rdev, src_offset);
113 radeon_ring_write(rdev, dst_offset);
114 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
115 src_offset += cur_size;
116 dst_offset += cur_size;
117 }
118 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
119 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
120 if (fence) {
121 r = radeon_fence_emit(rdev, fence);
122 }
123 radeon_ring_unlock_commit(rdev);
124 return r;
125}
126
127
82static int r200_get_vtx_size_1(uint32_t vtx_fmt_1) 128static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
83{ 129{
84 int vtx_size, i, tex_size; 130 int vtx_size, i, tex_size;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 43b55a030b4d..4cef90cd74e5 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
117 r = radeon_gart_table_vram_pin(rdev); 117 r = radeon_gart_table_vram_pin(rdev);
118 if (r) 118 if (r)
119 return r; 119 return r;
120 radeon_gart_restore(rdev);
120 /* discard memory request outside of configured range */ 121 /* discard memory request outside of configured range */
121 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 122 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
122 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 123 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
123 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); 124 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
124 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; 125 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
125 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 126 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
126 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 127 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
127 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 128 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
128 table_addr = rdev->gart.table_addr; 129 table_addr = rdev->gart.table_addr;
129 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 130 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
130 /* FIXME: setup default page */ 131 /* FIXME: setup default page */
131 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); 132 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
132 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 133 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
133 /* Clear error */ 134 /* Clear error */
134 WREG32_PCIE(0x18, 0); 135 WREG32_PCIE(0x18, 0);
@@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
174 /* Who ever call radeon_fence_emit should call ring_lock and ask 175 /* Who ever call radeon_fence_emit should call ring_lock and ask
175 * for enough space (today caller are ib schedule and buffer move) */ 176 * for enough space (today caller are ib schedule and buffer move) */
176 /* Write SC register so SC & US assert idle */ 177 /* Write SC register so SC & US assert idle */
177 radeon_ring_write(rdev, PACKET0(0x43E0, 0)); 178 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
178 radeon_ring_write(rdev, 0); 179 radeon_ring_write(rdev, 0);
179 radeon_ring_write(rdev, PACKET0(0x43E4, 0)); 180 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
180 radeon_ring_write(rdev, 0); 181 radeon_ring_write(rdev, 0);
181 /* Flush 3D cache */ 182 /* Flush 3D cache */
182 radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); 183 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
183 radeon_ring_write(rdev, (2 << 0)); 184 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
184 radeon_ring_write(rdev, PACKET0(0x4F18, 0)); 185 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
185 radeon_ring_write(rdev, (1 << 0)); 186 radeon_ring_write(rdev, R300_ZC_FLUSH);
186 /* Wait until IDLE & CLEAN */ 187 /* Wait until IDLE & CLEAN */
187 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 188 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
188 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 189 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
190 RADEON_WAIT_2D_IDLECLEAN |
191 RADEON_WAIT_DMA_GUI_IDLE));
189 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | 193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191 RADEON_HDP_READ_BUFFER_INVALIDATE); 194 RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
198 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 201 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
199} 202}
200 203
201int r300_copy_dma(struct radeon_device *rdev,
202 uint64_t src_offset,
203 uint64_t dst_offset,
204 unsigned num_pages,
205 struct radeon_fence *fence)
206{
207 uint32_t size;
208 uint32_t cur_size;
209 int i, num_loops;
210 int r = 0;
211
212 /* radeon pitch is /64 */
213 size = num_pages << PAGE_SHIFT;
214 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
215 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
216 if (r) {
217 DRM_ERROR("radeon: moving bo (%d).\n", r);
218 return r;
219 }
220 /* Must wait for 2D idle & clean before DMA or hangs might happen */
221 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
222 radeon_ring_write(rdev, (1 << 16));
223 for (i = 0; i < num_loops; i++) {
224 cur_size = size;
225 if (cur_size > 0x1FFFFF) {
226 cur_size = 0x1FFFFF;
227 }
228 size -= cur_size;
229 radeon_ring_write(rdev, PACKET0(0x720, 2));
230 radeon_ring_write(rdev, src_offset);
231 radeon_ring_write(rdev, dst_offset);
232 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
233 src_offset += cur_size;
234 dst_offset += cur_size;
235 }
236 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
237 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
238 if (fence) {
239 r = radeon_fence_emit(rdev, fence);
240 }
241 radeon_ring_unlock_commit(rdev);
242 return r;
243}
244
245void r300_ring_start(struct radeon_device *rdev) 204void r300_ring_start(struct radeon_device *rdev)
246{ 205{
247 unsigned gb_tile_config; 206 unsigned gb_tile_config;
@@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev)
281 radeon_ring_write(rdev, 240 radeon_ring_write(rdev,
282 RADEON_WAIT_2D_IDLECLEAN | 241 RADEON_WAIT_2D_IDLECLEAN |
283 RADEON_WAIT_3D_IDLECLEAN); 242 RADEON_WAIT_3D_IDLECLEAN);
284 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 243 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
285 radeon_ring_write(rdev, 1 << 31); 244 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
286 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 245 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
287 radeon_ring_write(rdev, 0); 246 radeon_ring_write(rdev, 0);
288 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 247 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
@@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
349 308
350 for (i = 0; i < rdev->usec_timeout; i++) { 309 for (i = 0; i < rdev->usec_timeout; i++) {
351 /* read MC_STATUS */ 310 /* read MC_STATUS */
352 tmp = RREG32(0x0150); 311 tmp = RREG32(RADEON_MC_STATUS);
353 if (tmp & (1 << 4)) { 312 if (tmp & R300_MC_IDLE) {
354 return 0; 313 return 0;
355 } 314 }
356 DRM_UDELAY(1); 315 DRM_UDELAY(1);
@@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev)
395 "programming pipes. Bad things might happen.\n"); 354 "programming pipes. Bad things might happen.\n");
396 } 355 }
397 356
398 tmp = RREG32(0x170C); 357 tmp = RREG32(R300_DST_PIPE_CONFIG);
399 WREG32(0x170C, tmp | (1 << 31)); 358 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
400 359
401 WREG32(R300_RB2D_DSTCACHE_MODE, 360 WREG32(R300_RB2D_DSTCACHE_MODE,
402 R300_DC_AUTOFLUSH_ENABLE | 361 R300_DC_AUTOFLUSH_ENABLE |
@@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev)
437 /* GA still busy soft reset it */ 396 /* GA still busy soft reset it */
438 WREG32(0x429C, 0x200); 397 WREG32(0x429C, 0x200);
439 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); 398 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
440 WREG32(0x43E0, 0); 399 WREG32(R300_RE_SCISSORS_TL, 0);
441 WREG32(0x43E4, 0); 400 WREG32(R300_RE_SCISSORS_BR, 0);
442 WREG32(0x24AC, 0); 401 WREG32(0x24AC, 0);
443 } 402 }
444 /* Wait to prevent race in RBBM_STATUS */ 403 /* Wait to prevent race in RBBM_STATUS */
@@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
488 } 447 }
489 /* Check if GPU is idle */ 448 /* Check if GPU is idle */
490 status = RREG32(RADEON_RBBM_STATUS); 449 status = RREG32(RADEON_RBBM_STATUS);
491 if (status & (1 << 31)) { 450 if (status & RADEON_RBBM_ACTIVE) {
492 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 451 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
493 return -1; 452 return -1;
494 } 453 }
@@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev)
500/* 459/*
501 * r300,r350,rv350,rv380 VRAM info 460 * r300,r350,rv350,rv380 VRAM info
502 */ 461 */
503void r300_vram_info(struct radeon_device *rdev) 462void r300_mc_init(struct radeon_device *rdev)
504{ 463{
505 uint32_t tmp; 464 u64 base;
465 u32 tmp;
506 466
507 /* DDR for all card after R300 & IGP */ 467 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 468 rdev->mc.vram_is_ddr = true;
509
510 tmp = RREG32(RADEON_MEM_CNTL); 469 tmp = RREG32(RADEON_MEM_CNTL);
511 tmp &= R300_MEM_NUM_CHANNELS_MASK; 470 tmp &= R300_MEM_NUM_CHANNELS_MASK;
512 switch (tmp) { 471 switch (tmp) {
@@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev)
515 case 2: rdev->mc.vram_width = 256; break; 474 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break; 475 default: rdev->mc.vram_width = 128; break;
517 } 476 }
518
519 r100_vram_init_sizes(rdev); 477 r100_vram_init_sizes(rdev);
478 base = rdev->mc.aper_base;
479 if (rdev->flags & RADEON_IS_IGP)
480 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
481 radeon_vram_location(rdev, &rdev->mc, base);
482 if (!(rdev->flags & RADEON_IS_AGP))
483 radeon_gtt_location(rdev, &rdev->mc);
520} 484}
521 485
522void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 486void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
578 542
579} 543}
580 544
545int rv370_get_pcie_lanes(struct radeon_device *rdev)
546{
547 u32 link_width_cntl;
548
549 if (rdev->flags & RADEON_IS_IGP)
550 return 0;
551
552 if (!(rdev->flags & RADEON_IS_PCIE))
553 return 0;
554
555 /* FIXME wait for idle */
556
557 if (rdev->family < CHIP_R600)
558 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
559 else
560 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
561
562 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
563 case RADEON_PCIE_LC_LINK_WIDTH_X0:
564 return 0;
565 case RADEON_PCIE_LC_LINK_WIDTH_X1:
566 return 1;
567 case RADEON_PCIE_LC_LINK_WIDTH_X2:
568 return 2;
569 case RADEON_PCIE_LC_LINK_WIDTH_X4:
570 return 4;
571 case RADEON_PCIE_LC_LINK_WIDTH_X8:
572 return 8;
573 case RADEON_PCIE_LC_LINK_WIDTH_X16:
574 default:
575 return 16;
576 }
577}
578
581#if defined(CONFIG_DEBUG_FS) 579#if defined(CONFIG_DEBUG_FS)
582static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 580static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
583{ 581{
@@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
707 tile_flags |= R300_TXO_MACRO_TILE; 705 tile_flags |= R300_TXO_MACRO_TILE;
708 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
709 tile_flags |= R300_TXO_MICRO_TILE; 707 tile_flags |= R300_TXO_MICRO_TILE;
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
710 710
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
712 tmp |= tile_flags; 712 tmp |= tile_flags;
@@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
757 tile_flags |= R300_COLOR_TILE_ENABLE; 757 tile_flags |= R300_COLOR_TILE_ENABLE;
758 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 758 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
759 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 759 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
760 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
761 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
760 762
761 tmp = idx_value & ~(0x7 << 16); 763 tmp = idx_value & ~(0x7 << 16);
762 tmp |= tile_flags; 764 tmp |= tile_flags;
@@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
828 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 830 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
829 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 831 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
830 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 832 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
831 tile_flags |= R300_DEPTHMICROTILE_TILED;; 833 tile_flags |= R300_DEPTHMICROTILE_TILED;
834 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
835 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
832 836
833 tmp = idx_value & ~(0x7 << 16); 837 tmp = idx_value & ~(0x7 << 16);
834 tmp |= tile_flags; 838 tmp |= tile_flags;
@@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev)
1387 radeon_get_clock_info(rdev->ddev); 1391 radeon_get_clock_info(rdev->ddev);
1388 /* Initialize power management */ 1392 /* Initialize power management */
1389 radeon_pm_init(rdev); 1393 radeon_pm_init(rdev);
1390 /* Get vram informations */ 1394 /* initialize AGP */
1391 r300_vram_info(rdev); 1395 if (rdev->flags & RADEON_IS_AGP) {
1392 /* Initialize memory controller (also test AGP) */ 1396 r = radeon_agp_init(rdev);
1393 r = r420_mc_init(rdev); 1397 if (r) {
1394 if (r) 1398 radeon_agp_disable(rdev);
1395 return r; 1399 }
1400 }
1401 /* initialize memory controller */
1402 r300_mc_init(rdev);
1396 /* Fence driver */ 1403 /* Fence driver */
1397 r = radeon_fence_driver_init(rdev); 1404 r = radeon_fence_driver_init(rdev);
1398 if (r) 1405 if (r)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 34bffa0e4b73..ea46d558e8f3 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -33,6 +33,7 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include "drm.h" 35#include "drm.h"
36#include "drm_buffer.h"
36#include "radeon_drm.h" 37#include "radeon_drm.h"
37#include "radeon_drv.h" 38#include "radeon_drv.h"
38#include "r300_reg.h" 39#include "r300_reg.h"
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
299 int reg; 300 int reg;
300 int sz; 301 int sz;
301 int i; 302 int i;
302 int values[64]; 303 u32 *value;
303 RING_LOCALS; 304 RING_LOCALS;
304 305
305 sz = header.packet0.count; 306 sz = header.packet0.count;
306 reg = (header.packet0.reghi << 8) | header.packet0.reglo; 307 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
307 308
308 if ((sz > 64) || (sz < 0)) { 309 if ((sz > 64) || (sz < 0)) {
309 DRM_ERROR 310 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
310 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", 311 reg, sz);
311 reg, sz);
312 return -EINVAL; 312 return -EINVAL;
313 } 313 }
314
314 for (i = 0; i < sz; i++) { 315 for (i = 0; i < sz; i++) {
315 values[i] = ((int *)cmdbuf->buf)[i];
316 switch (r300_reg_flags[(reg >> 2) + i]) { 316 switch (r300_reg_flags[(reg >> 2) + i]) {
317 case MARK_SAFE: 317 case MARK_SAFE:
318 break; 318 break;
319 case MARK_CHECK_OFFSET: 319 case MARK_CHECK_OFFSET:
320 if (!radeon_check_offset(dev_priv, (u32) values[i])) { 320 value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
321 DRM_ERROR 321 if (!radeon_check_offset(dev_priv, *value)) {
322 ("Offset failed range check (reg=%04x sz=%d)\n", 322 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
323 reg, sz); 323 reg, sz);
324 return -EINVAL; 324 return -EINVAL;
325 } 325 }
326 break; 326 break;
327 default: 327 default:
328 DRM_ERROR("Register %04x failed check as flag=%02x\n", 328 DRM_ERROR("Register %04x failed check as flag=%02x\n",
329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]); 329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
330 return -EINVAL; 330 return -EINVAL;
331 } 331 }
332 } 332 }
333 333
334 BEGIN_RING(1 + sz); 334 BEGIN_RING(1 + sz);
335 OUT_RING(CP_PACKET0(reg, sz - 1)); 335 OUT_RING(CP_PACKET0(reg, sz - 1));
336 OUT_RING_TABLE(values, sz); 336 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
337 ADVANCE_RING(); 337 ADVANCE_RING();
338 338
339 cmdbuf->buf += sz * 4;
340 cmdbuf->bufsz -= sz * 4;
341
342 return 0; 339 return 0;
343} 340}
344 341
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
362 if (!sz) 359 if (!sz)
363 return 0; 360 return 0;
364 361
365 if (sz * 4 > cmdbuf->bufsz) 362 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
366 return -EINVAL; 363 return -EINVAL;
367 364
368 if (reg + sz * 4 >= 0x10000) { 365 if (reg + sz * 4 >= 0x10000) {
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
380 377
381 BEGIN_RING(1 + sz); 378 BEGIN_RING(1 + sz);
382 OUT_RING(CP_PACKET0(reg, sz - 1)); 379 OUT_RING(CP_PACKET0(reg, sz - 1));
383 OUT_RING_TABLE((int *)cmdbuf->buf, sz); 380 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
384 ADVANCE_RING(); 381 ADVANCE_RING();
385 382
386 cmdbuf->buf += sz * 4;
387 cmdbuf->bufsz -= sz * 4;
388
389 return 0; 383 return 0;
390} 384}
391 385
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
407 401
408 if (!sz) 402 if (!sz)
409 return 0; 403 return 0;
410 if (sz * 16 > cmdbuf->bufsz) 404 if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
411 return -EINVAL; 405 return -EINVAL;
412 406
413 /* VAP is very sensitive so we purge cache before we program it 407 /* VAP is very sensitive so we purge cache before we program it
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
426 BEGIN_RING(3 + sz * 4); 420 BEGIN_RING(3 + sz * 4);
427 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); 421 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
428 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); 422 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
429 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); 423 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
430 ADVANCE_RING(); 424 ADVANCE_RING();
431 425
432 BEGIN_RING(2); 426 BEGIN_RING(2);
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
434 OUT_RING(0); 428 OUT_RING(0);
435 ADVANCE_RING(); 429 ADVANCE_RING();
436 430
437 cmdbuf->buf += sz * 16;
438 cmdbuf->bufsz -= sz * 16;
439
440 return 0; 431 return 0;
441} 432}
442 433
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
449{ 440{
450 RING_LOCALS; 441 RING_LOCALS;
451 442
452 if (8 * 4 > cmdbuf->bufsz) 443 if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
453 return -EINVAL; 444 return -EINVAL;
454 445
455 BEGIN_RING(10); 446 BEGIN_RING(10);
456 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); 447 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
457 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | 448 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
458 (1 << R300_PRIM_NUM_VERTICES_SHIFT)); 449 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
459 OUT_RING_TABLE((int *)cmdbuf->buf, 8); 450 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
460 ADVANCE_RING(); 451 ADVANCE_RING();
461 452
462 BEGIN_RING(4); 453 BEGIN_RING(4);
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
468 /* set flush flag */ 459 /* set flush flag */
469 dev_priv->track_flush |= RADEON_FLUSH_EMITED; 460 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
470 461
471 cmdbuf->buf += 8 * 4;
472 cmdbuf->bufsz -= 8 * 4;
473
474 return 0; 462 return 0;
475} 463}
476 464
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
480{ 468{
481 int count, i, k; 469 int count, i, k;
482#define MAX_ARRAY_PACKET 64 470#define MAX_ARRAY_PACKET 64
483 u32 payload[MAX_ARRAY_PACKET]; 471 u32 *data;
484 u32 narrays; 472 u32 narrays;
485 RING_LOCALS; 473 RING_LOCALS;
486 474
487 count = (header >> 16) & 0x3fff; 475 count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
488 476
489 if ((count + 1) > MAX_ARRAY_PACKET) { 477 if ((count + 1) > MAX_ARRAY_PACKET) {
490 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 478 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
491 count); 479 count);
492 return -EINVAL; 480 return -EINVAL;
493 } 481 }
494 memset(payload, 0, MAX_ARRAY_PACKET * 4);
495 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
496
497 /* carefully check packet contents */ 482 /* carefully check packet contents */
498 483
499 narrays = payload[0]; 484 /* We have already read the header so advance the buffer. */
485 drm_buffer_advance(cmdbuf->buffer, 4);
486
487 narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
500 k = 0; 488 k = 0;
501 i = 1; 489 i = 1;
502 while ((k < narrays) && (i < (count + 1))) { 490 while ((k < narrays) && (i < (count + 1))) {
503 i++; /* skip attribute field */ 491 i++; /* skip attribute field */
504 if (!radeon_check_offset(dev_priv, payload[i])) { 492 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
493 if (!radeon_check_offset(dev_priv, *data)) {
505 DRM_ERROR 494 DRM_ERROR
506 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 495 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
507 k, i); 496 k, i);
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
512 if (k == narrays) 501 if (k == narrays)
513 break; 502 break;
514 /* have one more to process, they come in pairs */ 503 /* have one more to process, they come in pairs */
515 if (!radeon_check_offset(dev_priv, payload[i])) { 504 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
505 if (!radeon_check_offset(dev_priv, *data)) {
516 DRM_ERROR 506 DRM_ERROR
517 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 507 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
518 k, i); 508 k, i);
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
533 523
534 BEGIN_RING(count + 2); 524 BEGIN_RING(count + 2);
535 OUT_RING(header); 525 OUT_RING(header);
536 OUT_RING_TABLE(payload, count + 1); 526 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
537 ADVANCE_RING(); 527 ADVANCE_RING();
538 528
539 cmdbuf->buf += (count + 2) * 4;
540 cmdbuf->bufsz -= (count + 2) * 4;
541
542 return 0; 529 return 0;
543} 530}
544 531
545static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, 532static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
546 drm_radeon_kcmd_buffer_t *cmdbuf) 533 drm_radeon_kcmd_buffer_t *cmdbuf)
547{ 534{
548 u32 *cmd = (u32 *) cmdbuf->buf; 535 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
549 int count, ret; 536 int count, ret;
550 RING_LOCALS; 537 RING_LOCALS;
551 538
552 count=(cmd[0]>>16) & 0x3fff;
553 539
554 if (cmd[0] & 0x8000) { 540 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
555 u32 offset;
556 541
557 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 542 if (*cmd & 0x8000) {
543 u32 offset;
544 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
545 if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
558 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 546 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
559 offset = cmd[2] << 10; 547
548 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
549 offset = *cmd2 << 10;
560 ret = !radeon_check_offset(dev_priv, offset); 550 ret = !radeon_check_offset(dev_priv, offset);
561 if (ret) { 551 if (ret) {
562 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); 552 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
564 } 554 }
565 } 555 }
566 556
567 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 557 if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
568 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 558 (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
569 offset = cmd[3] << 10; 559 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
560 offset = *cmd3 << 10;
570 ret = !radeon_check_offset(dev_priv, offset); 561 ret = !radeon_check_offset(dev_priv, offset);
571 if (ret) { 562 if (ret) {
572 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); 563 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
577 } 568 }
578 569
579 BEGIN_RING(count+2); 570 BEGIN_RING(count+2);
580 OUT_RING(cmd[0]); 571 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
581 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
582 ADVANCE_RING(); 572 ADVANCE_RING();
583 573
584 cmdbuf->buf += (count+2)*4;
585 cmdbuf->bufsz -= (count+2)*4;
586
587 return 0; 574 return 0;
588} 575}
589 576
590static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, 577static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
591 drm_radeon_kcmd_buffer_t *cmdbuf) 578 drm_radeon_kcmd_buffer_t *cmdbuf)
592{ 579{
593 u32 *cmd; 580 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
581 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
594 int count; 582 int count;
595 int expected_count; 583 int expected_count;
596 RING_LOCALS; 584 RING_LOCALS;
597 585
598 cmd = (u32 *) cmdbuf->buf; 586 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
599 count = (cmd[0]>>16) & 0x3fff; 587
600 expected_count = cmd[1] >> 16; 588 expected_count = *cmd1 >> 16;
601 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) 589 if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
602 expected_count = (expected_count+1)/2; 590 expected_count = (expected_count+1)/2;
603 591
604 if (count && count != expected_count) { 592 if (count && count != expected_count) {
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
608 } 596 }
609 597
610 BEGIN_RING(count+2); 598 BEGIN_RING(count+2);
611 OUT_RING(cmd[0]); 599 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
612 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
613 ADVANCE_RING(); 600 ADVANCE_RING();
614 601
615 cmdbuf->buf += (count+2)*4;
616 cmdbuf->bufsz -= (count+2)*4;
617
618 if (!count) { 602 if (!count) {
619 drm_r300_cmd_header_t header; 603 drm_r300_cmd_header_t stack_header, *header;
604 u32 *cmd1, *cmd2, *cmd3;
620 605
621 if (cmdbuf->bufsz < 4*4 + sizeof(header)) { 606 if (drm_buffer_unprocessed(cmdbuf->buffer)
607 < 4*4 + sizeof(stack_header)) {
622 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); 608 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
623 return -EINVAL; 609 return -EINVAL;
624 } 610 }
625 611
626 header.u = *(unsigned int *)cmdbuf->buf; 612 header = drm_buffer_read_object(cmdbuf->buffer,
613 sizeof(stack_header), &stack_header);
627 614
628 cmdbuf->buf += sizeof(header); 615 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
629 cmdbuf->bufsz -= sizeof(header); 616 cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
630 cmd = (u32 *) cmdbuf->buf; 617 cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
618 cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
631 619
632 if (header.header.cmd_type != R300_CMD_PACKET3 || 620 if (header->header.cmd_type != R300_CMD_PACKET3 ||
633 header.packet3.packet != R300_CMD_PACKET3_RAW || 621 header->packet3.packet != R300_CMD_PACKET3_RAW ||
634 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { 622 *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
635 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); 623 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
636 return -EINVAL; 624 return -EINVAL;
637 } 625 }
638 626
639 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 627 if ((*cmd1 & 0x8000ffff) != 0x80000810) {
640 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 628 DRM_ERROR("Invalid indx_buffer reg address %08X\n",
629 *cmd1);
641 return -EINVAL; 630 return -EINVAL;
642 } 631 }
643 if (!radeon_check_offset(dev_priv, cmd[2])) { 632 if (!radeon_check_offset(dev_priv, *cmd2)) {
644 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 633 DRM_ERROR("Invalid indx_buffer offset is %08X\n",
634 *cmd2);
645 return -EINVAL; 635 return -EINVAL;
646 } 636 }
647 if (cmd[3] != expected_count) { 637 if (*cmd3 != expected_count) {
648 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", 638 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
649 cmd[3], expected_count); 639 *cmd3, expected_count);
650 return -EINVAL; 640 return -EINVAL;
651 } 641 }
652 642
653 BEGIN_RING(4); 643 BEGIN_RING(4);
654 OUT_RING(cmd[0]); 644 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
655 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
656 ADVANCE_RING(); 645 ADVANCE_RING();
657
658 cmdbuf->buf += 4*4;
659 cmdbuf->bufsz -= 4*4;
660 } 646 }
661 647
662 return 0; 648 return 0;
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
665static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, 651static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
666 drm_radeon_kcmd_buffer_t *cmdbuf) 652 drm_radeon_kcmd_buffer_t *cmdbuf)
667{ 653{
668 u32 header; 654 u32 *header;
669 int count; 655 int count;
670 RING_LOCALS; 656 RING_LOCALS;
671 657
672 if (4 > cmdbuf->bufsz) 658 if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
673 return -EINVAL; 659 return -EINVAL;
674 660
675 /* Fixme !! This simply emits a packet without much checking. 661 /* Fixme !! This simply emits a packet without much checking.
676 We need to be smarter. */ 662 We need to be smarter. */
677 663
678 /* obtain first word - actual packet3 header */ 664 /* obtain first word - actual packet3 header */
679 header = *(u32 *) cmdbuf->buf; 665 header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
680 666
681 /* Is it packet 3 ? */ 667 /* Is it packet 3 ? */
682 if ((header >> 30) != 0x3) { 668 if ((*header >> 30) != 0x3) {
683 DRM_ERROR("Not a packet3 header (0x%08x)\n", header); 669 DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
684 return -EINVAL; 670 return -EINVAL;
685 } 671 }
686 672
687 count = (header >> 16) & 0x3fff; 673 count = (*header >> 16) & 0x3fff;
688 674
689 /* Check again now that we know how much data to expect */ 675 /* Check again now that we know how much data to expect */
690 if ((count + 2) * 4 > cmdbuf->bufsz) { 676 if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
691 DRM_ERROR 677 DRM_ERROR
692 ("Expected packet3 of length %d but have only %d bytes left\n", 678 ("Expected packet3 of length %d but have only %d bytes left\n",
693 (count + 2) * 4, cmdbuf->bufsz); 679 (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
694 return -EINVAL; 680 return -EINVAL;
695 } 681 }
696 682
697 /* Is it a packet type we know about ? */ 683 /* Is it a packet type we know about ? */
698 switch (header & 0xff00) { 684 switch (*header & 0xff00) {
699 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ 685 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
700 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); 686 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
701 687
702 case RADEON_CNTL_BITBLT_MULTI: 688 case RADEON_CNTL_BITBLT_MULTI:
703 return r300_emit_bitblt_multi(dev_priv, cmdbuf); 689 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
723 /* these packets are safe */ 709 /* these packets are safe */
724 break; 710 break;
725 default: 711 default:
726 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); 712 DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
727 return -EINVAL; 713 return -EINVAL;
728 } 714 }
729 715
730 BEGIN_RING(count + 2); 716 BEGIN_RING(count + 2);
731 OUT_RING(header); 717 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
732 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
733 ADVANCE_RING(); 718 ADVANCE_RING();
734 719
735 cmdbuf->buf += (count + 2) * 4;
736 cmdbuf->bufsz -= (count + 2) * 4;
737
738 return 0; 720 return 0;
739} 721}
740 722
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
748{ 730{
749 int n; 731 int n;
750 int ret; 732 int ret;
751 char *orig_buf = cmdbuf->buf; 733 int orig_iter = cmdbuf->buffer->iterator;
752 int orig_bufsz = cmdbuf->bufsz;
753 734
754 /* This is a do-while-loop so that we run the interior at least once, 735 /* This is a do-while-loop so that we run the interior at least once,
755 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. 736 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
761 if (ret) 742 if (ret)
762 return ret; 743 return ret;
763 744
764 cmdbuf->buf = orig_buf; 745 cmdbuf->buffer->iterator = orig_iter;
765 cmdbuf->bufsz = orig_bufsz;
766 } 746 }
767 747
768 switch (header.packet3.packet) { 748 switch (header.packet3.packet) {
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
785 break; 765 break;
786 766
787 default: 767 default:
788 DRM_ERROR("bad packet3 type %i at %p\n", 768 DRM_ERROR("bad packet3 type %i at byte %d\n",
789 header.packet3.packet, 769 header.packet3.packet,
790 cmdbuf->buf - sizeof(header)); 770 cmdbuf->buffer->iterator - (int)sizeof(header));
791 return -EINVAL; 771 return -EINVAL;
792 } 772 }
793 773
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
923 drm_r300_cmd_header_t header) 903 drm_r300_cmd_header_t header)
924{ 904{
925 u32 *ref_age_base; 905 u32 *ref_age_base;
926 u32 i, buf_idx, h_pending; 906 u32 i, *buf_idx, h_pending;
927 u64 ptr_addr; 907 u64 *ptr_addr;
908 u64 stack_ptr_addr;
928 RING_LOCALS; 909 RING_LOCALS;
929 910
930 if (cmdbuf->bufsz < 911 if (drm_buffer_unprocessed(cmdbuf->buffer) <
931 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { 912 (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
932 return -EINVAL; 913 return -EINVAL;
933 } 914 }
934 915
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
938 919
939 dev_priv->scratch_ages[header.scratch.reg]++; 920 dev_priv->scratch_ages[header.scratch.reg]++;
940 921
941 ptr_addr = get_unaligned((u64 *)cmdbuf->buf); 922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
942 ref_age_base = (u32 *)(unsigned long)ptr_addr; 923 sizeof(stack_ptr_addr), &stack_ptr_addr);
943 924 ref_age_base = (u32 *)(unsigned long)*ptr_addr;
944 cmdbuf->buf += sizeof(u64);
945 cmdbuf->bufsz -= sizeof(u64);
946 925
947 for (i=0; i < header.scratch.n_bufs; i++) { 926 for (i=0; i < header.scratch.n_bufs; i++) {
948 buf_idx = *(u32 *)cmdbuf->buf; 927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
949 buf_idx *= 2; /* 8 bytes per buf */ 928 *buf_idx *= 2; /* 8 bytes per buf */
950 929
951 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { 930 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
931 &dev_priv->scratch_ages[header.scratch.reg],
932 sizeof(u32)))
952 return -EINVAL; 933 return -EINVAL;
953 }
954 934
955 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { 935 if (DRM_COPY_FROM_USER(&h_pending,
936 ref_age_base + *buf_idx + 1,
937 sizeof(u32)))
956 return -EINVAL; 938 return -EINVAL;
957 }
958 939
959 if (h_pending == 0) { 940 if (h_pending == 0)
960 return -EINVAL; 941 return -EINVAL;
961 }
962 942
963 h_pending--; 943 h_pending--;
964 944
965 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { 945 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
946 &h_pending,
947 sizeof(u32)))
966 return -EINVAL; 948 return -EINVAL;
967 }
968 949
969 cmdbuf->buf += sizeof(buf_idx); 950 drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
970 cmdbuf->bufsz -= sizeof(buf_idx);
971 } 951 }
972 952
973 BEGIN_RING(2); 953 BEGIN_RING(2);
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
1009 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); 989 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1010 if (!sz) 990 if (!sz)
1011 return 0; 991 return 0;
1012 if (sz * stride * 4 > cmdbuf->bufsz) 992 if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
1013 return -EINVAL; 993 return -EINVAL;
1014 994
1015 BEGIN_RING(3 + sz * stride); 995 BEGIN_RING(3 + sz * stride);
1016 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); 996 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1017 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); 997 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1018 OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); 998 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
1019 999
1020 ADVANCE_RING(); 1000 ADVANCE_RING();
1021 1001
1022 cmdbuf->buf += sz * stride * 4;
1023 cmdbuf->bufsz -= sz * stride * 4;
1024
1025 return 0; 1002 return 0;
1026} 1003}
1027 1004
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1053 goto cleanup; 1030 goto cleanup;
1054 } 1031 }
1055 1032
1056 while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { 1033 while (drm_buffer_unprocessed(cmdbuf->buffer)
1034 >= sizeof(drm_r300_cmd_header_t)) {
1057 int idx; 1035 int idx;
1058 drm_r300_cmd_header_t header; 1036 drm_r300_cmd_header_t *header, stack_header;
1059
1060 header.u = *(unsigned int *)cmdbuf->buf;
1061 1037
1062 cmdbuf->buf += sizeof(header); 1038 header = drm_buffer_read_object(cmdbuf->buffer,
1063 cmdbuf->bufsz -= sizeof(header); 1039 sizeof(stack_header), &stack_header);
1064 1040
1065 switch (header.header.cmd_type) { 1041 switch (header->header.cmd_type) {
1066 case R300_CMD_PACKET0: 1042 case R300_CMD_PACKET0:
1067 DRM_DEBUG("R300_CMD_PACKET0\n"); 1043 DRM_DEBUG("R300_CMD_PACKET0\n");
1068 ret = r300_emit_packet0(dev_priv, cmdbuf, header); 1044 ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
1069 if (ret) { 1045 if (ret) {
1070 DRM_ERROR("r300_emit_packet0 failed\n"); 1046 DRM_ERROR("r300_emit_packet0 failed\n");
1071 goto cleanup; 1047 goto cleanup;
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1074 1050
1075 case R300_CMD_VPU: 1051 case R300_CMD_VPU:
1076 DRM_DEBUG("R300_CMD_VPU\n"); 1052 DRM_DEBUG("R300_CMD_VPU\n");
1077 ret = r300_emit_vpu(dev_priv, cmdbuf, header); 1053 ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
1078 if (ret) { 1054 if (ret) {
1079 DRM_ERROR("r300_emit_vpu failed\n"); 1055 DRM_ERROR("r300_emit_vpu failed\n");
1080 goto cleanup; 1056 goto cleanup;
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1083 1059
1084 case R300_CMD_PACKET3: 1060 case R300_CMD_PACKET3:
1085 DRM_DEBUG("R300_CMD_PACKET3\n"); 1061 DRM_DEBUG("R300_CMD_PACKET3\n");
1086 ret = r300_emit_packet3(dev_priv, cmdbuf, header); 1062 ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
1087 if (ret) { 1063 if (ret) {
1088 DRM_ERROR("r300_emit_packet3 failed\n"); 1064 DRM_ERROR("r300_emit_packet3 failed\n");
1089 goto cleanup; 1065 goto cleanup;
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1117 int i; 1093 int i;
1118 RING_LOCALS; 1094 RING_LOCALS;
1119 1095
1120 BEGIN_RING(header.delay.count); 1096 BEGIN_RING(header->delay.count);
1121 for (i = 0; i < header.delay.count; i++) 1097 for (i = 0; i < header->delay.count; i++)
1122 OUT_RING(RADEON_CP_PACKET2); 1098 OUT_RING(RADEON_CP_PACKET2);
1123 ADVANCE_RING(); 1099 ADVANCE_RING();
1124 } 1100 }
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1126 1102
1127 case R300_CMD_DMA_DISCARD: 1103 case R300_CMD_DMA_DISCARD:
1128 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 1104 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
1129 idx = header.dma.buf_idx; 1105 idx = header->dma.buf_idx;
1130 if (idx < 0 || idx >= dma->buf_count) { 1106 if (idx < 0 || idx >= dma->buf_count) {
1131 DRM_ERROR("buffer index %d (of %d max)\n", 1107 DRM_ERROR("buffer index %d (of %d max)\n",
1132 idx, dma->buf_count - 1); 1108 idx, dma->buf_count - 1);
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1149 1125
1150 case R300_CMD_WAIT: 1126 case R300_CMD_WAIT:
1151 DRM_DEBUG("R300_CMD_WAIT\n"); 1127 DRM_DEBUG("R300_CMD_WAIT\n");
1152 r300_cmd_wait(dev_priv, header); 1128 r300_cmd_wait(dev_priv, *header);
1153 break; 1129 break;
1154 1130
1155 case R300_CMD_SCRATCH: 1131 case R300_CMD_SCRATCH:
1156 DRM_DEBUG("R300_CMD_SCRATCH\n"); 1132 DRM_DEBUG("R300_CMD_SCRATCH\n");
1157 ret = r300_scratch(dev_priv, cmdbuf, header); 1133 ret = r300_scratch(dev_priv, cmdbuf, *header);
1158 if (ret) { 1134 if (ret) {
1159 DRM_ERROR("r300_scratch failed\n"); 1135 DRM_ERROR("r300_scratch failed\n");
1160 goto cleanup; 1136 goto cleanup;
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1168 goto cleanup; 1144 goto cleanup;
1169 } 1145 }
1170 DRM_DEBUG("R300_CMD_R500FP\n"); 1146 DRM_DEBUG("R300_CMD_R500FP\n");
1171 ret = r300_emit_r500fp(dev_priv, cmdbuf, header); 1147 ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
1172 if (ret) { 1148 if (ret) {
1173 DRM_ERROR("r300_emit_r500fp failed\n"); 1149 DRM_ERROR("r300_emit_r500fp failed\n");
1174 goto cleanup; 1150 goto cleanup;
1175 } 1151 }
1176 break; 1152 break;
1177 default: 1153 default:
1178 DRM_ERROR("bad cmd_type %i at %p\n", 1154 DRM_ERROR("bad cmd_type %i at byte %d\n",
1179 header.header.cmd_type, 1155 header->header.cmd_type,
1180 cmdbuf->buf - sizeof(header)); 1156 cmdbuf->buffer->iterator - (int)sizeof(*header));
1181 ret = -EINVAL; 1157 ret = -EINVAL;
1182 goto cleanup; 1158 goto cleanup;
1183 } 1159 }
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1735a2b69580..1a0d5362cd79 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -952,6 +952,7 @@
952# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0) 952# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
953# define R300_TXO_MACRO_TILE (1 << 2) 953# define R300_TXO_MACRO_TILE (1 << 2)
954# define R300_TXO_MICRO_TILE (1 << 3) 954# define R300_TXO_MICRO_TILE (1 << 3)
955# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
955# define R300_TXO_OFFSET_MASK 0xffffffe0 956# define R300_TXO_OFFSET_MASK 0xffffffe0
956# define R300_TXO_OFFSET_SHIFT 5 957# define R300_TXO_OFFSET_SHIFT 5
957 /* END: Guess from R200 */ 958 /* END: Guess from R200 */
@@ -1360,6 +1361,7 @@
1360# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ 1361# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
1361# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ 1362# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
1362# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ 1363# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
1364# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
1363# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ 1365# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1364# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ 1366# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1365# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ 1367# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index d9373246c97f..c7593b8f58ee 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev)
40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); 40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
41} 41}
42 42
43int r420_mc_init(struct radeon_device *rdev)
44{
45 int r;
46
47 /* Setup GPU memory space */
48 rdev->mc.vram_location = 0xFFFFFFFFUL;
49 rdev->mc.gtt_location = 0xFFFFFFFFUL;
50 if (rdev->flags & RADEON_IS_AGP) {
51 r = radeon_agp_init(rdev);
52 if (r) {
53 radeon_agp_disable(rdev);
54 } else {
55 rdev->mc.gtt_location = rdev->mc.agp_base;
56 }
57 }
58 r = radeon_mc_setup(rdev);
59 if (r) {
60 return r;
61 }
62 return 0;
63}
64
65void r420_pipes_init(struct radeon_device *rdev) 43void r420_pipes_init(struct radeon_device *rdev)
66{ 44{
67 unsigned tmp; 45 unsigned tmp;
@@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev)
69 unsigned num_pipes; 47 unsigned num_pipes;
70 48
71 /* GA_ENHANCE workaround TCL deadlock issue */ 49 /* GA_ENHANCE workaround TCL deadlock issue */
72 WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); 50 WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
51 (1 << 2) | (1 << 3));
73 /* add idle wait as per freedesktop.org bug 24041 */ 52 /* add idle wait as per freedesktop.org bug 24041 */
74 if (r100_gui_wait_for_idle(rdev)) { 53 if (r100_gui_wait_for_idle(rdev)) {
75 printk(KERN_WARNING "Failed to wait GUI idle while " 54 printk(KERN_WARNING "Failed to wait GUI idle while "
@@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev)
97 tmp = (7 << 1); 76 tmp = (7 << 1);
98 break; 77 break;
99 } 78 }
100 WREG32(0x42C8, (1 << num_pipes) - 1); 79 WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
101 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 80 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
102 tmp |= (1 << 4) | (1 << 0); 81 tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
103 WREG32(0x4018, tmp); 82 WREG32(R300_GB_TILE_CONFIG, tmp);
104 if (r100_gui_wait_for_idle(rdev)) { 83 if (r100_gui_wait_for_idle(rdev)) {
105 printk(KERN_WARNING "Failed to wait GUI idle while " 84 printk(KERN_WARNING "Failed to wait GUI idle while "
106 "programming pipes. Bad things might happen.\n"); 85 "programming pipes. Bad things might happen.\n");
107 } 86 }
108 87
109 tmp = RREG32(0x170C); 88 tmp = RREG32(R300_DST_PIPE_CONFIG);
110 WREG32(0x170C, tmp | (1 << 31)); 89 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
111 90
112 WREG32(R300_RB2D_DSTCACHE_MODE, 91 WREG32(R300_RB2D_DSTCACHE_MODE,
113 RREG32(R300_RB2D_DSTCACHE_MODE) | 92 RREG32(R300_RB2D_DSTCACHE_MODE) |
@@ -348,13 +327,15 @@ int r420_init(struct radeon_device *rdev)
348 radeon_get_clock_info(rdev->ddev); 327 radeon_get_clock_info(rdev->ddev);
349 /* Initialize power management */ 328 /* Initialize power management */
350 radeon_pm_init(rdev); 329 radeon_pm_init(rdev);
351 /* Get vram informations */ 330 /* initialize AGP */
352 r300_vram_info(rdev); 331 if (rdev->flags & RADEON_IS_AGP) {
353 /* Initialize memory controller (also test AGP) */ 332 r = radeon_agp_init(rdev);
354 r = r420_mc_init(rdev); 333 if (r) {
355 if (r) { 334 radeon_agp_disable(rdev);
356 return r; 335 }
357 } 336 }
337 /* initialize memory controller */
338 r300_mc_init(rdev);
358 r420_debugfs(rdev); 339 r420_debugfs(rdev);
359 /* Fence driver */ 340 /* Fence driver */
360 r = radeon_fence_driver_init(rdev); 341 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 74ad89bdf2b5..0cf2ad2a5585 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -717,54 +717,62 @@
717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
718 718
719#define AVIVO_DC_GPIO_HPD_A 0x7e94 719#define AVIVO_DC_GPIO_HPD_A 0x7e94
720
721#define AVIVO_GPIO_0 0x7e30
722#define AVIVO_GPIO_1 0x7e40
723#define AVIVO_GPIO_2 0x7e50
724#define AVIVO_GPIO_3 0x7e60
725
726#define AVIVO_DC_GPIO_HPD_Y 0x7e9c 720#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
727 721
728#define AVIVO_I2C_STATUS 0x7d30 722#define AVIVO_DC_I2C_STATUS1 0x7d30
729# define AVIVO_I2C_STATUS_DONE (1 << 0) 723# define AVIVO_DC_I2C_DONE (1 << 0)
730# define AVIVO_I2C_STATUS_NACK (1 << 1) 724# define AVIVO_DC_I2C_NACK (1 << 1)
731# define AVIVO_I2C_STATUS_HALT (1 << 2) 725# define AVIVO_DC_I2C_HALT (1 << 2)
732# define AVIVO_I2C_STATUS_GO (1 << 3) 726# define AVIVO_DC_I2C_GO (1 << 3)
733# define AVIVO_I2C_STATUS_MASK 0x7 727#define AVIVO_DC_I2C_RESET 0x7d34
734/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe 728# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
735 * DONE? */ 729# define AVIVO_DC_I2C_ABORT (1 << 8)
736# define AVIVO_I2C_STATUS_CMD_RESET 0x7 730#define AVIVO_DC_I2C_CONTROL1 0x7d38
737# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3) 731# define AVIVO_DC_I2C_START (1 << 0)
738#define AVIVO_I2C_STOP 0x7d34 732# define AVIVO_DC_I2C_STOP (1 << 1)
739#define AVIVO_I2C_START_CNTL 0x7d38 733# define AVIVO_DC_I2C_RECEIVE (1 << 2)
740# define AVIVO_I2C_START (1 << 8) 734# define AVIVO_DC_I2C_EN (1 << 8)
741# define AVIVO_I2C_CONNECTOR0 (0 << 16) 735# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
742# define AVIVO_I2C_CONNECTOR1 (1 << 16) 736# define AVIVO_SEL_DDC1 0
743#define R520_I2C_START (1<<0) 737# define AVIVO_SEL_DDC2 1
744#define R520_I2C_STOP (1<<1) 738# define AVIVO_SEL_DDC3 2
745#define R520_I2C_RX (1<<2) 739#define AVIVO_DC_I2C_CONTROL2 0x7d3c
746#define R520_I2C_EN (1<<8) 740# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
747#define R520_I2C_DDC1 (0<<16) 741# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
748#define R520_I2C_DDC2 (1<<16) 742#define AVIVO_DC_I2C_CONTROL3 0x7d40
749#define R520_I2C_DDC3 (2<<16) 743# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
750#define R520_I2C_DDC_MASK (3<<16) 744# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
751#define AVIVO_I2C_CONTROL2 0x7d3c 745# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
752# define AVIVO_I2C_7D3C_SIZE_SHIFT 8 746# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
753# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8) 747# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
754#define AVIVO_I2C_CONTROL3 0x7d40 748# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
755/* Reading is done 4 bytes at a time: read the bottom 8 bits from 749#define AVIVO_DC_I2C_DATA 0x7d44
756 * 7d44, four times in a row. 750#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
757 * Writing is a little more complex. First write DATA with 751# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
758 * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic 752# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
759 * magic number, zz is, I think, the slave address, and yy is the byte 753# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
760 * you want to write. */ 754#define AVIVO_DC_I2C_ARBITRATION 0x7d50
761#define AVIVO_I2C_DATA 0x7d44 755# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
762#define R520_I2C_ADDR_COUNT_MASK (0x7) 756# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
763#define R520_I2C_DATA_COUNT_SHIFT (8) 757# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
764#define R520_I2C_DATA_COUNT_MASK (0xF00) 758# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
765#define AVIVO_I2C_CNTL 0x7d50 759# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
766# define AVIVO_I2C_EN (1 << 0) 760# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
767# define AVIVO_I2C_RESET (1 << 8) 761
762#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
763#define AVIVO_DC_GPIO_DDC1_A 0x7e44
764#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
765#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
766
767#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
768#define AVIVO_DC_GPIO_DDC2_A 0x7e54
769#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
770#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
771
772#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
773#define AVIVO_DC_GPIO_DDC3_A 0x7e64
774#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
775#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
768 776
769#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc 777#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
770# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) 778# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ddf5731eba0d..2b8a5dd13516 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev)
119 rdev->mc.vram_width *= 2; 119 rdev->mc.vram_width *= 2;
120} 120}
121 121
122void r520_vram_info(struct radeon_device *rdev) 122void r520_mc_init(struct radeon_device *rdev)
123{ 123{
124 fixed20_12 a; 124 fixed20_12 a;
125 125
126 r520_vram_get_type(rdev); 126 r520_vram_get_type(rdev);
127
128 r100_vram_init_sizes(rdev); 127 r100_vram_init_sizes(rdev);
128 radeon_vram_location(rdev, &rdev->mc, 0);
129 if (!(rdev->flags & RADEON_IS_AGP))
130 radeon_gtt_location(rdev, &rdev->mc);
129 /* FIXME: we should enforce default clock in case GPU is not in 131 /* FIXME: we should enforce default clock in case GPU is not in
130 * default setup 132 * default setup
131 */ 133 */
@@ -267,12 +269,15 @@ int r520_init(struct radeon_device *rdev)
267 radeon_get_clock_info(rdev->ddev); 269 radeon_get_clock_info(rdev->ddev);
268 /* Initialize power management */ 270 /* Initialize power management */
269 radeon_pm_init(rdev); 271 radeon_pm_init(rdev);
270 /* Get vram informations */ 272 /* initialize AGP */
271 r520_vram_info(rdev); 273 if (rdev->flags & RADEON_IS_AGP) {
272 /* Initialize memory controller (also test AGP) */ 274 r = radeon_agp_init(rdev);
273 r = r420_mc_init(rdev); 275 if (r) {
274 if (r) 276 radeon_agp_disable(rdev);
275 return r; 277 }
278 }
279 /* initialize memory controller */
280 r520_mc_init(rdev);
276 rv515_debugfs(rdev); 281 rv515_debugfs(rdev);
277 /* Fence driver */ 282 /* Fence driver */
278 r = radeon_fence_driver_init(rdev); 283 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a03551..c52290197292 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
353/* 353/*
354 * R600 PCIE GART 354 * R600 PCIE GART
355 */ 355 */
356int r600_gart_clear_page(struct radeon_device *rdev, int i)
357{
358 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
359 u64 pte;
360
361 if (i < 0 || i > rdev->gart.num_gpu_pages)
362 return -EINVAL;
363 pte = 0;
364 writeq(pte, ((void __iomem *)ptr) + (i * 8));
365 return 0;
366}
367
368void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 356void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
369{ 357{
370 unsigned i; 358 unsigned i;
371 u32 tmp; 359 u32 tmp;
372 360
361 /* flush hdp cache so updates hit vram */
362 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
363
373 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 364 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
374 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 365 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
375 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 366 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
416 r = radeon_gart_table_vram_pin(rdev); 407 r = radeon_gart_table_vram_pin(rdev);
417 if (r) 408 if (r)
418 return r; 409 return r;
410 radeon_gart_restore(rdev);
419 411
420 /* Setup L2 cache */ 412 /* Setup L2 cache */
421 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 413 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
619 rv515_vga_render_disable(rdev); 611 rv515_vga_render_disable(rdev);
620} 612}
621 613
614/**
615 * r600_vram_gtt_location - try to find VRAM & GTT location
616 * @rdev: radeon device structure holding all necessary informations
617 * @mc: memory controller structure holding memory informations
618 *
619 * Function will place try to place VRAM at same place as in CPU (PCI)
620 * address space as some GPU seems to have issue when we reprogram at
621 * different address space.
622 *
623 * If there is not enough space to fit the unvisible VRAM after the
624 * aperture then we limit the VRAM size to the aperture.
625 *
626 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
627 * them to be in one from GPU point of view so that we can program GPU to
628 * catch access outside them (weird GPU policy see ??).
629 *
630 * This function will never fails, worst case are limiting VRAM or GTT.
631 *
632 * Note: GTT start, end, size should be initialized before calling this
633 * function on AGP platform.
634 */
635void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
636{
637 u64 size_bf, size_af;
638
639 if (mc->mc_vram_size > 0xE0000000) {
640 /* leave room for at least 512M GTT */
641 dev_warn(rdev->dev, "limiting VRAM\n");
642 mc->real_vram_size = 0xE0000000;
643 mc->mc_vram_size = 0xE0000000;
644 }
645 if (rdev->flags & RADEON_IS_AGP) {
646 size_bf = mc->gtt_start;
647 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
648 if (size_bf > size_af) {
649 if (mc->mc_vram_size > size_bf) {
650 dev_warn(rdev->dev, "limiting VRAM\n");
651 mc->real_vram_size = size_bf;
652 mc->mc_vram_size = size_bf;
653 }
654 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
655 } else {
656 if (mc->mc_vram_size > size_af) {
657 dev_warn(rdev->dev, "limiting VRAM\n");
658 mc->real_vram_size = size_af;
659 mc->mc_vram_size = size_af;
660 }
661 mc->vram_start = mc->gtt_end;
662 }
663 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
664 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
665 mc->mc_vram_size >> 20, mc->vram_start,
666 mc->vram_end, mc->real_vram_size >> 20);
667 } else {
668 u64 base = 0;
669 if (rdev->flags & RADEON_IS_IGP)
670 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
671 radeon_vram_location(rdev, &rdev->mc, base);
672 radeon_gtt_location(rdev, mc);
673 }
674}
675
622int r600_mc_init(struct radeon_device *rdev) 676int r600_mc_init(struct radeon_device *rdev)
623{ 677{
624 fixed20_12 a; 678 fixed20_12 a;
@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
658 /* Setup GPU memory space */ 712 /* Setup GPU memory space */
659 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 713 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
660 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 714 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
661 715 rdev->mc.visible_vram_size = rdev->mc.aper_size;
662 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 716 /* FIXME remove this once we support unmappable VRAM */
717 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
663 rdev->mc.mc_vram_size = rdev->mc.aper_size; 718 rdev->mc.mc_vram_size = rdev->mc.aper_size;
664
665 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
666 rdev->mc.real_vram_size = rdev->mc.aper_size; 719 rdev->mc.real_vram_size = rdev->mc.aper_size;
667
668 if (rdev->flags & RADEON_IS_AGP) {
669 /* gtt_size is setup by radeon_agp_init */
670 rdev->mc.gtt_location = rdev->mc.agp_base;
671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
672 /* Try to put vram before or after AGP because we
673 * we want SYSTEM_APERTURE to cover both VRAM and
674 * AGP so that GPU can catch out of VRAM/AGP access
675 */
676 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
677 /* Enough place before */
678 rdev->mc.vram_location = rdev->mc.gtt_location -
679 rdev->mc.mc_vram_size;
680 } else if (tmp > rdev->mc.mc_vram_size) {
681 /* Enough place after */
682 rdev->mc.vram_location = rdev->mc.gtt_location +
683 rdev->mc.gtt_size;
684 } else {
685 /* Try to setup VRAM then AGP might not
686 * not work on some card
687 */
688 rdev->mc.vram_location = 0x00000000UL;
689 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
690 }
691 } else {
692 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
693 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
694 0xFFFF) << 24;
695 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
696 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
697 /* Enough place after vram */
698 rdev->mc.gtt_location = tmp;
699 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
700 /* Enough place before vram */
701 rdev->mc.gtt_location = 0;
702 } else {
703 /* Not enough place after or before shrink
704 * gart size
705 */
706 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
707 rdev->mc.gtt_location = 0;
708 rdev->mc.gtt_size = rdev->mc.vram_location;
709 } else {
710 rdev->mc.gtt_location = tmp;
711 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
712 }
713 }
714 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
715 } 720 }
716 rdev->mc.vram_start = rdev->mc.vram_location; 721 r600_vram_gtt_location(rdev, &rdev->mc);
717 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
718 rdev->mc.gtt_start = rdev->mc.gtt_location;
719 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
720 /* FIXME: we should enforce default clock in case GPU is not in 722 /* FIXME: we should enforce default clock in case GPU is not in
721 * default setup 723 * default setup
722 */ 724 */
723 a.full = rfixed_const(100); 725 a.full = rfixed_const(100);
724 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); 726 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
725 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 727 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
726
727 if (rdev->flags & RADEON_IS_IGP) 728 if (rdev->flags & RADEON_IS_IGP)
728 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 729 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
729
730 return 0; 730 return 0;
731} 731}
732 732
@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
981{ 981{
982 u32 tiling_config; 982 u32 tiling_config;
983 u32 ramcfg; 983 u32 ramcfg;
984 u32 backend_map;
985 u32 cc_rb_backend_disable;
986 u32 cc_gc_shader_pipe_config;
984 u32 tmp; 987 u32 tmp;
985 int i, j; 988 int i, j;
986 u32 sq_config; 989 u32 sq_config;
@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
1090 default: 1093 default:
1091 break; 1094 break;
1092 } 1095 }
1096 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1097 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1093 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1098 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1094 tiling_config |= GROUP_SIZE(0); 1099 tiling_config |= GROUP_SIZE(0);
1100 rdev->config.r600.tiling_group_size = 256;
1095 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1101 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1096 if (tmp > 3) { 1102 if (tmp > 3) {
1097 tiling_config |= ROW_TILING(3); 1103 tiling_config |= ROW_TILING(3);
@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
1101 tiling_config |= SAMPLE_SPLIT(tmp); 1107 tiling_config |= SAMPLE_SPLIT(tmp);
1102 } 1108 }
1103 tiling_config |= BANK_SWAPS(1); 1109 tiling_config |= BANK_SWAPS(1);
1104 tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, 1110
1105 rdev->config.r600.max_backends, 1111 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1106 (0xff << rdev->config.r600.max_backends) & 0xff); 1112 cc_rb_backend_disable |=
1107 tiling_config |= BACKEND_MAP(tmp); 1113 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1114
1115 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1116 cc_gc_shader_pipe_config |=
1117 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1118 cc_gc_shader_pipe_config |=
1119 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1120
1121 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1122 (R6XX_MAX_BACKENDS -
1123 r600_count_pipe_bits((cc_rb_backend_disable &
1124 R6XX_MAX_BACKENDS_MASK) >> 16)),
1125 (cc_rb_backend_disable >> 16));
1126
1127 tiling_config |= BACKEND_MAP(backend_map);
1108 WREG32(GB_TILING_CONFIG, tiling_config); 1128 WREG32(GB_TILING_CONFIG, tiling_config);
1109 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1129 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1110 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1130 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1111 1131
1112 tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1113 WREG32(CC_RB_BACKEND_DISABLE, tmp);
1114
1115 /* Setup pipes */ 1132 /* Setup pipes */
1116 tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); 1133 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1117 tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); 1134 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1118 WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1119 WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1120 1135
1121 tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK); 1136 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1122 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1137 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1123 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1138 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1124 1139
@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1783 struct radeon_fence *fence) 1798 struct radeon_fence *fence)
1784{ 1799{
1785 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ 1800 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1801
1802 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1803 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1804 /* wait for 3D idle clean */
1805 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1806 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1807 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1786 /* Emit fence sequence & fire IRQ */ 1808 /* Emit fence sequence & fire IRQ */
1787 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1809 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1788 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1810 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1789 radeon_ring_write(rdev, fence->seq); 1811 radeon_ring_write(rdev, fence->seq);
1790 radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1791 radeon_ring_write(rdev, 1);
1792 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 1812 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1793 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 1813 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1794 radeon_ring_write(rdev, RB_INT_STAT); 1814 radeon_ring_write(rdev, RB_INT_STAT);
@@ -2745,6 +2765,7 @@ restart_ih:
2745 case 0: /* D1 vblank */ 2765 case 0: /* D1 vblank */
2746 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2766 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2747 drm_handle_vblank(rdev->ddev, 0); 2767 drm_handle_vblank(rdev->ddev, 0);
2768 wake_up(&rdev->irq.vblank_queue);
2748 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2769 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2749 DRM_DEBUG("IH: D1 vblank\n"); 2770 DRM_DEBUG("IH: D1 vblank\n");
2750 } 2771 }
@@ -2765,6 +2786,7 @@ restart_ih:
2765 case 0: /* D2 vblank */ 2786 case 0: /* D2 vblank */
2766 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 2787 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2767 drm_handle_vblank(rdev->ddev, 1); 2788 drm_handle_vblank(rdev->ddev, 1);
2789 wake_up(&rdev->irq.vblank_queue);
2768 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 2790 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2769 DRM_DEBUG("IH: D2 vblank\n"); 2791 DRM_DEBUG("IH: D2 vblank\n");
2770 } 2792 }
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 0dcb6904c4ff..db928016d034 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) 38 return rdev->family >= CHIP_R600
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -147,15 +147,23 @@ static void r600_audio_update_hdmi(unsigned long param)
147} 147}
148 148
149/* 149/*
150 * turn on/off audio engine
151 */
152static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
153{
154 DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
155 WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
156}
157
158/*
150 * initialize the audio vars and register the update timer 159 * initialize the audio vars and register the update timer
151 */ 160 */
152int r600_audio_init(struct radeon_device *rdev) 161int r600_audio_init(struct radeon_device *rdev)
153{ 162{
154 if (!r600_audio_chipset_supported(rdev)) 163 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
155 return 0; 164 return 0;
156 165
157 DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling"); 166 r600_audio_engine_enable(rdev, true);
158 WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
159 167
160 rdev->audio_channels = -1; 168 rdev->audio_channels = -1;
161 rdev->audio_rate = -1; 169 rdev->audio_rate = -1;
@@ -258,9 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
258 */ 266 */
259void r600_audio_fini(struct radeon_device *rdev) 267void r600_audio_fini(struct radeon_device *rdev)
260{ 268{
261 if (!r600_audio_chipset_supported(rdev)) 269 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
262 return; 270 return;
263 271
264 del_timer(&rdev->audio_timer); 272 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); 273
274 r600_audio_engine_enable(rdev, false);
266} 275}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 5ea432347589..f4fb88ece2bb 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -49,7 +49,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
49 RING_LOCALS; 49 RING_LOCALS;
50 DRM_DEBUG("\n"); 50 DRM_DEBUG("\n");
51 51
52 h = (h + 7) & ~7; 52 h = ALIGN(h, 8);
53 if (h < 8) 53 if (h < 8)
54 h = 8; 54 h = 8;
55 55
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 446b765ac72a..f6c6c77db7e0 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -25,7 +25,7 @@ set_render_target(struct radeon_device *rdev, int format,
25 u32 cb_color_info; 25 u32 cb_color_info;
26 int pitch, slice; 26 int pitch, slice;
27 27
28 h = (h + 7) & ~7; 28 h = ALIGN(h, 8);
29 if (h < 8) 29 if (h < 8)
30 h = 8; 30 h = 8;
31 31
@@ -396,15 +396,13 @@ set_default_state(struct radeon_device *rdev)
396 NUM_ES_STACK_ENTRIES(num_es_stack_entries)); 396 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
397 397
398 /* emit an IB pointing at default state */ 398 /* emit an IB pointing at default state */
399 dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf; 399 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
400 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 400 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
402 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 402 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
403 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 403 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
404 radeon_ring_write(rdev, dwords); 404 radeon_ring_write(rdev, dwords);
405 405
406 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
407 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
408 /* SQ config */ 406 /* SQ config */
409 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6)); 407 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
410 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 408 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -578,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
578 ring_size = num_loops * dwords_per_loop; 576 ring_size = num_loops * dwords_per_loop;
579 /* set default + shaders */ 577 /* set default + shaders */
580 ring_size += 40; /* shaders + def state */ 578 ring_size += 40; /* shaders + def state */
581 ring_size += 7; /* fence emit for VB IB */ 579 ring_size += 10; /* fence emit for VB IB */
582 ring_size += 5; /* done copy */ 580 ring_size += 5; /* done copy */
583 ring_size += 7; /* fence emit for done copy */ 581 ring_size += 10; /* fence emit for done copy */
584 r = radeon_ring_lock(rdev, ring_size); 582 r = radeon_ring_lock(rdev, ring_size);
585 if (r) 583 if (r)
586 return r; 584 return r;
@@ -594,13 +592,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
594{ 592{
595 int r; 593 int r;
596 594
597 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
598 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
599 /* wait for 3D idle clean */
600 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
601 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
602 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
603
604 if (rdev->r600_blit.vb_ib) 595 if (rdev->r600_blit.vb_ib)
605 r600_vb_ib_put(rdev); 596 r600_vb_ib_put(rdev);
606 597
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index d745e815c2e8..a112c59f9d82 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -9,11 +9,6 @@ const u32 r6xx_default_state[] =
9 0xc0012800, 9 0xc0012800,
10 0x80000000, 10 0x80000000,
11 0x80000000, 11 0x80000000,
12 0xc0004600,
13 0x00000016,
14 0xc0016800,
15 0x00000010,
16 0x00028000,
17 0xc0016800, 12 0xc0016800,
18 0x00000010, 13 0x00000010,
19 0x00008000, 14 0x00008000,
@@ -531,11 +526,6 @@ const u32 r7xx_default_state[] =
531 0xc0012800, 526 0xc0012800,
532 0x80000000, 527 0x80000000,
533 0x80000000, 528 0x80000000,
534 0xc0004600,
535 0x00000016,
536 0xc0016800,
537 0x00000010,
538 0x00028000,
539 0xc0016800, 529 0xc0016800,
540 0x00000010, 530 0x00000010,
541 0x00008000, 531 0x00008000,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 75bcf35a0931..40416c068d9f 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev,
734 u32 hdp_host_path_cntl; 734 u32 hdp_host_path_cntl;
735 u32 backend_map; 735 u32 backend_map;
736 u32 gb_tiling_config = 0; 736 u32 gb_tiling_config = 0;
737 u32 cc_rb_backend_disable = 0; 737 u32 cc_rb_backend_disable;
738 u32 cc_gc_shader_pipe_config = 0; 738 u32 cc_gc_shader_pipe_config;
739 u32 ramcfg; 739 u32 ramcfg;
740 740
741 /* setup chip specs */ 741 /* setup chip specs */
@@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev,
857 857
858 gb_tiling_config |= R600_BANK_SWAPS(1); 858 gb_tiling_config |= R600_BANK_SWAPS(1);
859 859
860 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 860 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
861 dev_priv->r600_max_backends, 861 cc_rb_backend_disable |=
862 (0xff << dev_priv->r600_max_backends) & 0xff); 862 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
863 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
864 863
865 cc_gc_shader_pipe_config = 864 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
865 cc_gc_shader_pipe_config |=
866 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK); 866 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
867 cc_gc_shader_pipe_config |= 867 cc_gc_shader_pipe_config |=
868 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK); 868 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
869 869
870 cc_rb_backend_disable = 870 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
871 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK); 871 (R6XX_MAX_BACKENDS -
872 r600_count_pipe_bits((cc_rb_backend_disable &
873 R6XX_MAX_BACKENDS_MASK) >> 16)),
874 (cc_rb_backend_disable >> 16));
875 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
872 876
873 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 877 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
874 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 878 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
875 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 879 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
880 if (gb_tiling_config & 0xc0) {
881 dev_priv->r600_group_size = 512;
882 } else {
883 dev_priv->r600_group_size = 256;
884 }
885 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
886 if (gb_tiling_config & 0x30) {
887 dev_priv->r600_nbanks = 8;
888 } else {
889 dev_priv->r600_nbanks = 4;
890 }
876 891
877 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 892 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
878 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 893 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
879 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 894 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
880 895
881 num_qd_pipes = 896 num_qd_pipes =
882 R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); 897 R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
883 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 898 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
884 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 899 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
885 900
@@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev,
1151 1166
1152} 1167}
1153 1168
1154static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 1169static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
1170 u32 num_tile_pipes,
1155 u32 num_backends, 1171 u32 num_backends,
1156 u32 backend_disable_mask) 1172 u32 backend_disable_mask)
1157{ 1173{
@@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1162 u32 swizzle_pipe[R7XX_MAX_PIPES]; 1178 u32 swizzle_pipe[R7XX_MAX_PIPES];
1163 u32 cur_backend; 1179 u32 cur_backend;
1164 u32 i; 1180 u32 i;
1181 bool force_no_swizzle;
1165 1182
1166 if (num_tile_pipes > R7XX_MAX_PIPES) 1183 if (num_tile_pipes > R7XX_MAX_PIPES)
1167 num_tile_pipes = R7XX_MAX_PIPES; 1184 num_tile_pipes = R7XX_MAX_PIPES;
@@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1191 if (enabled_backends_count != num_backends) 1208 if (enabled_backends_count != num_backends)
1192 num_backends = enabled_backends_count; 1209 num_backends = enabled_backends_count;
1193 1210
1211 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1212 case CHIP_RV770:
1213 case CHIP_RV730:
1214 force_no_swizzle = false;
1215 break;
1216 case CHIP_RV710:
1217 case CHIP_RV740:
1218 default:
1219 force_no_swizzle = true;
1220 break;
1221 }
1222
1194 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); 1223 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
1195 switch (num_tile_pipes) { 1224 switch (num_tile_pipes) {
1196 case 1: 1225 case 1:
@@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1201 swizzle_pipe[1] = 1; 1230 swizzle_pipe[1] = 1;
1202 break; 1231 break;
1203 case 3: 1232 case 3:
1204 swizzle_pipe[0] = 0; 1233 if (force_no_swizzle) {
1205 swizzle_pipe[1] = 2; 1234 swizzle_pipe[0] = 0;
1206 swizzle_pipe[2] = 1; 1235 swizzle_pipe[1] = 1;
1236 swizzle_pipe[2] = 2;
1237 } else {
1238 swizzle_pipe[0] = 0;
1239 swizzle_pipe[1] = 2;
1240 swizzle_pipe[2] = 1;
1241 }
1207 break; 1242 break;
1208 case 4: 1243 case 4:
1209 swizzle_pipe[0] = 0; 1244 if (force_no_swizzle) {
1210 swizzle_pipe[1] = 2; 1245 swizzle_pipe[0] = 0;
1211 swizzle_pipe[2] = 3; 1246 swizzle_pipe[1] = 1;
1212 swizzle_pipe[3] = 1; 1247 swizzle_pipe[2] = 2;
1248 swizzle_pipe[3] = 3;
1249 } else {
1250 swizzle_pipe[0] = 0;
1251 swizzle_pipe[1] = 2;
1252 swizzle_pipe[2] = 3;
1253 swizzle_pipe[3] = 1;
1254 }
1213 break; 1255 break;
1214 case 5: 1256 case 5:
1215 swizzle_pipe[0] = 0; 1257 if (force_no_swizzle) {
1216 swizzle_pipe[1] = 2; 1258 swizzle_pipe[0] = 0;
1217 swizzle_pipe[2] = 4; 1259 swizzle_pipe[1] = 1;
1218 swizzle_pipe[3] = 1; 1260 swizzle_pipe[2] = 2;
1219 swizzle_pipe[4] = 3; 1261 swizzle_pipe[3] = 3;
1262 swizzle_pipe[4] = 4;
1263 } else {
1264 swizzle_pipe[0] = 0;
1265 swizzle_pipe[1] = 2;
1266 swizzle_pipe[2] = 4;
1267 swizzle_pipe[3] = 1;
1268 swizzle_pipe[4] = 3;
1269 }
1220 break; 1270 break;
1221 case 6: 1271 case 6:
1222 swizzle_pipe[0] = 0; 1272 if (force_no_swizzle) {
1223 swizzle_pipe[1] = 2; 1273 swizzle_pipe[0] = 0;
1224 swizzle_pipe[2] = 4; 1274 swizzle_pipe[1] = 1;
1225 swizzle_pipe[3] = 5; 1275 swizzle_pipe[2] = 2;
1226 swizzle_pipe[4] = 3; 1276 swizzle_pipe[3] = 3;
1227 swizzle_pipe[5] = 1; 1277 swizzle_pipe[4] = 4;
1278 swizzle_pipe[5] = 5;
1279 } else {
1280 swizzle_pipe[0] = 0;
1281 swizzle_pipe[1] = 2;
1282 swizzle_pipe[2] = 4;
1283 swizzle_pipe[3] = 5;
1284 swizzle_pipe[4] = 3;
1285 swizzle_pipe[5] = 1;
1286 }
1228 break; 1287 break;
1229 case 7: 1288 case 7:
1230 swizzle_pipe[0] = 0; 1289 if (force_no_swizzle) {
1231 swizzle_pipe[1] = 2; 1290 swizzle_pipe[0] = 0;
1232 swizzle_pipe[2] = 4; 1291 swizzle_pipe[1] = 1;
1233 swizzle_pipe[3] = 6; 1292 swizzle_pipe[2] = 2;
1234 swizzle_pipe[4] = 3; 1293 swizzle_pipe[3] = 3;
1235 swizzle_pipe[5] = 1; 1294 swizzle_pipe[4] = 4;
1236 swizzle_pipe[6] = 5; 1295 swizzle_pipe[5] = 5;
1296 swizzle_pipe[6] = 6;
1297 } else {
1298 swizzle_pipe[0] = 0;
1299 swizzle_pipe[1] = 2;
1300 swizzle_pipe[2] = 4;
1301 swizzle_pipe[3] = 6;
1302 swizzle_pipe[4] = 3;
1303 swizzle_pipe[5] = 1;
1304 swizzle_pipe[6] = 5;
1305 }
1237 break; 1306 break;
1238 case 8: 1307 case 8:
1239 swizzle_pipe[0] = 0; 1308 if (force_no_swizzle) {
1240 swizzle_pipe[1] = 2; 1309 swizzle_pipe[0] = 0;
1241 swizzle_pipe[2] = 4; 1310 swizzle_pipe[1] = 1;
1242 swizzle_pipe[3] = 6; 1311 swizzle_pipe[2] = 2;
1243 swizzle_pipe[4] = 3; 1312 swizzle_pipe[3] = 3;
1244 swizzle_pipe[5] = 1; 1313 swizzle_pipe[4] = 4;
1245 swizzle_pipe[6] = 7; 1314 swizzle_pipe[5] = 5;
1246 swizzle_pipe[7] = 5; 1315 swizzle_pipe[6] = 6;
1316 swizzle_pipe[7] = 7;
1317 } else {
1318 swizzle_pipe[0] = 0;
1319 swizzle_pipe[1] = 2;
1320 swizzle_pipe[2] = 4;
1321 swizzle_pipe[3] = 6;
1322 swizzle_pipe[4] = 3;
1323 swizzle_pipe[5] = 1;
1324 swizzle_pipe[6] = 7;
1325 swizzle_pipe[7] = 5;
1326 }
1247 break; 1327 break;
1248 } 1328 }
1249 1329
@@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev,
1264 drm_radeon_private_t *dev_priv) 1344 drm_radeon_private_t *dev_priv)
1265{ 1345{
1266 int i, j, num_qd_pipes; 1346 int i, j, num_qd_pipes;
1347 u32 ta_aux_cntl;
1267 u32 sx_debug_1; 1348 u32 sx_debug_1;
1268 u32 smx_dc_ctl0; 1349 u32 smx_dc_ctl0;
1350 u32 db_debug3;
1269 u32 num_gs_verts_per_thread; 1351 u32 num_gs_verts_per_thread;
1270 u32 vgt_gs_per_es; 1352 u32 vgt_gs_per_es;
1271 u32 gs_prim_buffer_depth = 0; 1353 u32 gs_prim_buffer_depth = 0;
@@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev,
1276 u32 sq_dyn_gpr_size_simd_ab_0; 1358 u32 sq_dyn_gpr_size_simd_ab_0;
1277 u32 backend_map; 1359 u32 backend_map;
1278 u32 gb_tiling_config = 0; 1360 u32 gb_tiling_config = 0;
1279 u32 cc_rb_backend_disable = 0; 1361 u32 cc_rb_backend_disable;
1280 u32 cc_gc_shader_pipe_config = 0; 1362 u32 cc_gc_shader_pipe_config;
1281 u32 mc_arb_ramcfg; 1363 u32 mc_arb_ramcfg;
1282 u32 db_debug4; 1364 u32 db_debug4;
1283 1365
@@ -1428,38 +1510,51 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1510
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1511 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1512
1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) 1513 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1432 backend_map = 0x28; 1514 cc_rb_backend_disable |=
1433 else 1515 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1438 1516
1439 cc_gc_shader_pipe_config = 1517 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1518 cc_gc_shader_pipe_config |=
1440 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK); 1519 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
1441 cc_gc_shader_pipe_config |= 1520 cc_gc_shader_pipe_config |=
1442 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK); 1521 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
1443 1522
1444 cc_rb_backend_disable = 1523 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1445 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK); 1524 backend_map = 0x28;
1525 else
1526 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
1527 dev_priv->r600_max_tile_pipes,
1528 (R7XX_MAX_BACKENDS -
1529 r600_count_pipe_bits((cc_rb_backend_disable &
1530 R7XX_MAX_BACKENDS_MASK) >> 16)),
1531 (cc_rb_backend_disable >> 16));
1532 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1446 1533
1447 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 1534 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
1448 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1535 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1449 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1536 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1537 if (gb_tiling_config & 0xc0) {
1538 dev_priv->r600_group_size = 512;
1539 } else {
1540 dev_priv->r600_group_size = 256;
1541 }
1542 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
1543 if (gb_tiling_config & 0x30) {
1544 dev_priv->r600_nbanks = 8;
1545 } else {
1546 dev_priv->r600_nbanks = 4;
1547 }
1450 1548
1451 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1549 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1452 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1550 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1453 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1454 1551
1455 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1552 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1456 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); 1553 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
1457 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); 1554 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
1458 RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
1459 RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
1460 1555
1461 num_qd_pipes = 1556 num_qd_pipes =
1462 R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); 1557 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
1463 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 1558 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
1464 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 1559 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
1465 1560
@@ -1469,10 +1564,8 @@ static void r700_gfx_init(struct drm_device *dev,
1469 1564
1470 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30)); 1565 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
1471 1566
1472 RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO | 1567 ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
1473 R600_SYNC_GRADIENT | 1568 RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
1474 R600_SYNC_WALKER |
1475 R600_SYNC_ALIGNER));
1476 1569
1477 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1); 1570 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
1478 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS; 1571 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
@@ -1483,14 +1576,28 @@ static void r700_gfx_init(struct drm_device *dev,
1483 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1); 1576 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
1484 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0); 1577 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
1485 1578
1486 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) | 1579 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
1487 R700_GS_FLUSH_CTL(4) | 1580 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
1488 R700_ACK_FLUSH_CTL(3) | 1581 R700_GS_FLUSH_CTL(4) |
1489 R700_SYNC_FLUSH_CTL)); 1582 R700_ACK_FLUSH_CTL(3) |
1583 R700_SYNC_FLUSH_CTL));
1490 1584
1491 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770) 1585 db_debug3 = RADEON_READ(R700_DB_DEBUG3);
1492 RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f)); 1586 db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
1493 else { 1587 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1588 case CHIP_RV770:
1589 case CHIP_RV740:
1590 db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
1591 break;
1592 case CHIP_RV710:
1593 case CHIP_RV730:
1594 default:
1595 db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
1596 break;
1597 }
1598 RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
1599
1600 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
1494 db_debug4 = RADEON_READ(RV700_DB_DEBUG4); 1601 db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
1495 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER; 1602 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
1496 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4); 1603 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
@@ -1519,10 +1626,10 @@ static void r700_gfx_init(struct drm_device *dev,
1519 R600_ALU_UPDATE_FIFO_HIWATER(0x8)); 1626 R600_ALU_UPDATE_FIFO_HIWATER(0x8));
1520 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1627 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1521 case CHIP_RV770: 1628 case CHIP_RV770:
1522 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
1523 break;
1524 case CHIP_RV730: 1629 case CHIP_RV730:
1525 case CHIP_RV710: 1630 case CHIP_RV710:
1631 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
1632 break;
1526 case CHIP_RV740: 1633 case CHIP_RV740:
1527 default: 1634 default:
1528 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); 1635 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
@@ -2529,3 +2636,12 @@ out:
2529 mutex_unlock(&dev_priv->cs_mutex); 2636 mutex_unlock(&dev_priv->cs_mutex);
2530 return r; 2637 return r;
2531} 2638}
2639
2640void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
2641{
2642 struct drm_radeon_private *dev_priv = dev->dev_private;
2643
2644 *npipes = dev_priv->r600_npipes;
2645 *nbanks = dev_priv->r600_nbanks;
2646 *group_size = dev_priv->r600_group_size;
2647}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index e4c45ec16507..cd2c63bce501 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "r600d.h" 30#include "r600d.h"
31#include "r600_reg_safe.h"
31 32
32static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
33 struct radeon_cs_reloc **cs_reloc); 34 struct radeon_cs_reloc **cs_reloc);
@@ -35,11 +36,313 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc); 36 struct radeon_cs_reloc **cs_reloc);
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 37typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 38static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
39extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
40
38 41
39struct r600_cs_track { 42struct r600_cs_track {
40 u32 cb_color0_base_last; 43 /* configuration we miror so that we use same code btw kms/ums */
44 u32 group_size;
45 u32 nbanks;
46 u32 npipes;
47 /* value we track */
48 u32 nsamples;
49 u32 cb_color_base_last[8];
50 struct radeon_bo *cb_color_bo[8];
51 u32 cb_color_bo_offset[8];
52 struct radeon_bo *cb_color_frag_bo[8];
53 struct radeon_bo *cb_color_tile_bo[8];
54 u32 cb_color_info[8];
55 u32 cb_color_size_idx[8];
56 u32 cb_target_mask;
57 u32 cb_shader_mask;
58 u32 cb_color_size[8];
59 u32 vgt_strmout_en;
60 u32 vgt_strmout_buffer_en;
61 u32 db_depth_control;
62 u32 db_depth_info;
63 u32 db_depth_size_idx;
64 u32 db_depth_view;
65 u32 db_depth_size;
66 u32 db_offset;
67 struct radeon_bo *db_bo;
41}; 68};
42 69
70static inline int r600_bpe_from_format(u32 *bpe, u32 format)
71{
72 switch (format) {
73 case V_038004_COLOR_8:
74 case V_038004_COLOR_4_4:
75 case V_038004_COLOR_3_3_2:
76 case V_038004_FMT_1:
77 *bpe = 1;
78 break;
79 case V_038004_COLOR_16:
80 case V_038004_COLOR_16_FLOAT:
81 case V_038004_COLOR_8_8:
82 case V_038004_COLOR_5_6_5:
83 case V_038004_COLOR_6_5_5:
84 case V_038004_COLOR_1_5_5_5:
85 case V_038004_COLOR_4_4_4_4:
86 case V_038004_COLOR_5_5_5_1:
87 *bpe = 2;
88 break;
89 case V_038004_FMT_8_8_8:
90 *bpe = 3;
91 break;
92 case V_038004_COLOR_32:
93 case V_038004_COLOR_32_FLOAT:
94 case V_038004_COLOR_16_16:
95 case V_038004_COLOR_16_16_FLOAT:
96 case V_038004_COLOR_8_24:
97 case V_038004_COLOR_8_24_FLOAT:
98 case V_038004_COLOR_24_8:
99 case V_038004_COLOR_24_8_FLOAT:
100 case V_038004_COLOR_10_11_11:
101 case V_038004_COLOR_10_11_11_FLOAT:
102 case V_038004_COLOR_11_11_10:
103 case V_038004_COLOR_11_11_10_FLOAT:
104 case V_038004_COLOR_2_10_10_10:
105 case V_038004_COLOR_8_8_8_8:
106 case V_038004_COLOR_10_10_10_2:
107 case V_038004_FMT_5_9_9_9_SHAREDEXP:
108 case V_038004_FMT_32_AS_8:
109 case V_038004_FMT_32_AS_8_8:
110 *bpe = 4;
111 break;
112 case V_038004_COLOR_X24_8_32_FLOAT:
113 case V_038004_COLOR_32_32:
114 case V_038004_COLOR_32_32_FLOAT:
115 case V_038004_COLOR_16_16_16_16:
116 case V_038004_COLOR_16_16_16_16_FLOAT:
117 *bpe = 8;
118 break;
119 case V_038004_FMT_16_16_16:
120 case V_038004_FMT_16_16_16_FLOAT:
121 *bpe = 6;
122 break;
123 case V_038004_FMT_32_32_32:
124 case V_038004_FMT_32_32_32_FLOAT:
125 *bpe = 12;
126 break;
127 case V_038004_COLOR_32_32_32_32:
128 case V_038004_COLOR_32_32_32_32_FLOAT:
129 *bpe = 16;
130 break;
131 case V_038004_FMT_GB_GR:
132 case V_038004_FMT_BG_RG:
133 case V_038004_COLOR_INVALID:
134 *bpe = 16;
135 return -EINVAL;
136 }
137 return 0;
138}
139
140static void r600_cs_track_init(struct r600_cs_track *track)
141{
142 int i;
143
144 for (i = 0; i < 8; i++) {
145 track->cb_color_base_last[i] = 0;
146 track->cb_color_size[i] = 0;
147 track->cb_color_size_idx[i] = 0;
148 track->cb_color_info[i] = 0;
149 track->cb_color_bo[i] = NULL;
150 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
151 }
152 track->cb_target_mask = 0xFFFFFFFF;
153 track->cb_shader_mask = 0xFFFFFFFF;
154 track->db_bo = NULL;
155 /* assume the biggest format and that htile is enabled */
156 track->db_depth_info = 7 | (1 << 25);
157 track->db_depth_view = 0xFFFFC000;
158 track->db_depth_size = 0xFFFFFFFF;
159 track->db_depth_size_idx = 0;
160 track->db_depth_control = 0xFFFFFFFF;
161}
162
163static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
164{
165 struct r600_cs_track *track = p->track;
166 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
167 volatile u32 *ib = p->ib->ptr;
168
169 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
170 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
171 return -EINVAL;
172 }
173 size = radeon_bo_size(track->cb_color_bo[i]);
174 if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
175 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
176 __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
177 i, track->cb_color_info[i]);
178 return -EINVAL;
179 }
180 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
181 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
182 if (!pitch) {
183 dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
184 __func__, __LINE__, pitch, i, track->cb_color_size[i]);
185 return -EINVAL;
186 }
187 height = size / (pitch * bpe);
188 if (height > 8192)
189 height = 8192;
190 switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
191 case V_0280A0_ARRAY_LINEAR_GENERAL:
192 case V_0280A0_ARRAY_LINEAR_ALIGNED:
193 if (pitch & 0x3f) {
194 dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
195 __func__, __LINE__, pitch, bpe, pitch * bpe);
196 return -EINVAL;
197 }
198 if ((pitch * bpe) & (track->group_size - 1)) {
199 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
200 __func__, __LINE__, pitch);
201 return -EINVAL;
202 }
203 break;
204 case V_0280A0_ARRAY_1D_TILED_THIN1:
205 if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
206 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
207 __func__, __LINE__, pitch);
208 return -EINVAL;
209 }
210 height &= ~0x7;
211 if (!height)
212 height = 8;
213 break;
214 case V_0280A0_ARRAY_2D_TILED_THIN1:
215 if (pitch & ((8 * track->nbanks) - 1)) {
216 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
217 __func__, __LINE__, pitch);
218 return -EINVAL;
219 }
220 tmp = pitch * 8 * bpe * track->nsamples;
221 tmp = tmp / track->nbanks;
222 if (tmp & (track->group_size - 1)) {
223 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
224 __func__, __LINE__, pitch);
225 return -EINVAL;
226 }
227 height &= ~((16 * track->npipes) - 1);
228 if (!height)
229 height = 16 * track->npipes;
230 break;
231 default:
232 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
233 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
234 track->cb_color_info[i]);
235 return -EINVAL;
236 }
237 /* check offset */
238 tmp = height * pitch;
239 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
240 dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
241 return -EINVAL;
242 }
243 /* limit max tile */
244 tmp = (height * pitch) >> 6;
245 if (tmp < slice_tile_max)
246 slice_tile_max = tmp;
247 tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
248 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
249 ib[track->cb_color_size_idx[i]] = tmp;
250 return 0;
251}
252
253static int r600_cs_track_check(struct radeon_cs_parser *p)
254{
255 struct r600_cs_track *track = p->track;
256 u32 tmp;
257 int r, i;
258 volatile u32 *ib = p->ib->ptr;
259
260 /* on legacy kernel we don't perform advanced check */
261 if (p->rdev == NULL)
262 return 0;
263 /* we don't support out buffer yet */
264 if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
265 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
266 return -EINVAL;
267 }
268 /* check that we have a cb for each enabled target, we don't check
269 * shader_mask because it seems mesa isn't always setting it :(
270 */
271 tmp = track->cb_target_mask;
272 for (i = 0; i < 8; i++) {
273 if ((tmp >> (i * 4)) & 0xF) {
274 /* at least one component is enabled */
275 if (track->cb_color_bo[i] == NULL) {
276 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
277 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
278 return -EINVAL;
279 }
280 /* perform rewrite of CB_COLOR[0-7]_SIZE */
281 r = r600_cs_track_validate_cb(p, i);
282 if (r)
283 return r;
284 }
285 }
286 /* Check depth buffer */
287 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
288 G_028800_Z_ENABLE(track->db_depth_control)) {
289 u32 nviews, bpe, ntiles;
290 if (track->db_bo == NULL) {
291 dev_warn(p->dev, "z/stencil with no depth buffer\n");
292 return -EINVAL;
293 }
294 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
295 dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
296 return -EINVAL;
297 }
298 switch (G_028010_FORMAT(track->db_depth_info)) {
299 case V_028010_DEPTH_16:
300 bpe = 2;
301 break;
302 case V_028010_DEPTH_X8_24:
303 case V_028010_DEPTH_8_24:
304 case V_028010_DEPTH_X8_24_FLOAT:
305 case V_028010_DEPTH_8_24_FLOAT:
306 case V_028010_DEPTH_32_FLOAT:
307 bpe = 4;
308 break;
309 case V_028010_DEPTH_X24_8_32_FLOAT:
310 bpe = 8;
311 break;
312 default:
313 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
314 return -EINVAL;
315 }
316 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
317 if (!track->db_depth_size_idx) {
318 dev_warn(p->dev, "z/stencil buffer size not set\n");
319 return -EINVAL;
320 }
321 printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
322 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
323 tmp = (tmp / bpe) >> 6;
324 if (!tmp) {
325 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
326 track->db_depth_size, bpe, track->db_offset,
327 radeon_bo_size(track->db_bo));
328 return -EINVAL;
329 }
330 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
331 } else {
332 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
333 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
334 tmp = ntiles * bpe * 64 * nviews;
335 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
336 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
337 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
338 radeon_bo_size(track->db_bo));
339 return -EINVAL;
340 }
341 }
342 }
343 return 0;
344}
345
43/** 346/**
44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 347 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
45 * @parser: parser structure holding parsing context. 348 * @parser: parser structure holding parsing context.
@@ -359,6 +662,334 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
359 return 0; 662 return 0;
360} 663}
361 664
665/**
666 * r600_cs_check_reg() - check if register is authorized or not
667 * @parser: parser structure holding parsing context
668 * @reg: register we are testing
669 * @idx: index into the cs buffer
670 *
671 * This function will test against r600_reg_safe_bm and return 0
672 * if register is safe. If register is not flag as safe this function
673 * will test it against a list of register needind special handling.
674 */
675static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
676{
677 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
678 struct radeon_cs_reloc *reloc;
679 u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
680 u32 m, i, tmp, *ib;
681 int r;
682
683 i = (reg >> 7);
684 if (i > last_reg) {
685 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
686 return -EINVAL;
687 }
688 m = 1 << ((reg >> 2) & 31);
689 if (!(r600_reg_safe_bm[i] & m))
690 return 0;
691 ib = p->ib->ptr;
692 switch (reg) {
693 /* force following reg to 0 in an attemp to disable out buffer
694 * which will need us to better understand how it works to perform
695 * security check on it (Jerome)
696 */
697 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
698 case R_008C44_SQ_ESGS_RING_SIZE:
699 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
700 case R_008C54_SQ_ESTMP_RING_SIZE:
701 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
702 case R_008C74_SQ_FBUF_RING_SIZE:
703 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
704 case R_008C5C_SQ_GSTMP_RING_SIZE:
705 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
706 case R_008C4C_SQ_GSVS_RING_SIZE:
707 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
708 case R_008C6C_SQ_PSTMP_RING_SIZE:
709 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
710 case R_008C7C_SQ_REDUC_RING_SIZE:
711 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
712 case R_008C64_SQ_VSTMP_RING_SIZE:
713 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
714 /* get value to populate the IB don't remove */
715 tmp =radeon_get_ib_value(p, idx);
716 ib[idx] = 0;
717 break;
718 case R_028800_DB_DEPTH_CONTROL:
719 track->db_depth_control = radeon_get_ib_value(p, idx);
720 break;
721 case R_028010_DB_DEPTH_INFO:
722 track->db_depth_info = radeon_get_ib_value(p, idx);
723 break;
724 case R_028004_DB_DEPTH_VIEW:
725 track->db_depth_view = radeon_get_ib_value(p, idx);
726 break;
727 case R_028000_DB_DEPTH_SIZE:
728 track->db_depth_size = radeon_get_ib_value(p, idx);
729 track->db_depth_size_idx = idx;
730 break;
731 case R_028AB0_VGT_STRMOUT_EN:
732 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
733 break;
734 case R_028B20_VGT_STRMOUT_BUFFER_EN:
735 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
736 break;
737 case R_028238_CB_TARGET_MASK:
738 track->cb_target_mask = radeon_get_ib_value(p, idx);
739 break;
740 case R_02823C_CB_SHADER_MASK:
741 track->cb_shader_mask = radeon_get_ib_value(p, idx);
742 break;
743 case R_028C04_PA_SC_AA_CONFIG:
744 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
745 track->nsamples = 1 << tmp;
746 break;
747 case R_0280A0_CB_COLOR0_INFO:
748 case R_0280A4_CB_COLOR1_INFO:
749 case R_0280A8_CB_COLOR2_INFO:
750 case R_0280AC_CB_COLOR3_INFO:
751 case R_0280B0_CB_COLOR4_INFO:
752 case R_0280B4_CB_COLOR5_INFO:
753 case R_0280B8_CB_COLOR6_INFO:
754 case R_0280BC_CB_COLOR7_INFO:
755 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
756 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
757 break;
758 case R_028060_CB_COLOR0_SIZE:
759 case R_028064_CB_COLOR1_SIZE:
760 case R_028068_CB_COLOR2_SIZE:
761 case R_02806C_CB_COLOR3_SIZE:
762 case R_028070_CB_COLOR4_SIZE:
763 case R_028074_CB_COLOR5_SIZE:
764 case R_028078_CB_COLOR6_SIZE:
765 case R_02807C_CB_COLOR7_SIZE:
766 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
767 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
768 track->cb_color_size_idx[tmp] = idx;
769 break;
770 /* This register were added late, there is userspace
771 * which does provide relocation for those but set
772 * 0 offset. In order to avoid breaking old userspace
773 * we detect this and set address to point to last
774 * CB_COLOR0_BASE, note that if userspace doesn't set
775 * CB_COLOR0_BASE before this register we will report
776 * error. Old userspace always set CB_COLOR0_BASE
777 * before any of this.
778 */
779 case R_0280E0_CB_COLOR0_FRAG:
780 case R_0280E4_CB_COLOR1_FRAG:
781 case R_0280E8_CB_COLOR2_FRAG:
782 case R_0280EC_CB_COLOR3_FRAG:
783 case R_0280F0_CB_COLOR4_FRAG:
784 case R_0280F4_CB_COLOR5_FRAG:
785 case R_0280F8_CB_COLOR6_FRAG:
786 case R_0280FC_CB_COLOR7_FRAG:
787 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
788 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
789 if (!track->cb_color_base_last[tmp]) {
790 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
791 return -EINVAL;
792 }
793 ib[idx] = track->cb_color_base_last[tmp];
794 printk_once(KERN_WARNING "You have old & broken userspace "
795 "please consider updating mesa & xf86-video-ati\n");
796 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
797 } else {
798 r = r600_cs_packet_next_reloc(p, &reloc);
799 if (r) {
800 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
801 return -EINVAL;
802 }
803 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
804 track->cb_color_frag_bo[tmp] = reloc->robj;
805 }
806 break;
807 case R_0280C0_CB_COLOR0_TILE:
808 case R_0280C4_CB_COLOR1_TILE:
809 case R_0280C8_CB_COLOR2_TILE:
810 case R_0280CC_CB_COLOR3_TILE:
811 case R_0280D0_CB_COLOR4_TILE:
812 case R_0280D4_CB_COLOR5_TILE:
813 case R_0280D8_CB_COLOR6_TILE:
814 case R_0280DC_CB_COLOR7_TILE:
815 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
816 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
817 if (!track->cb_color_base_last[tmp]) {
818 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
819 return -EINVAL;
820 }
821 ib[idx] = track->cb_color_base_last[tmp];
822 printk_once(KERN_WARNING "You have old & broken userspace "
823 "please consider updating mesa & xf86-video-ati\n");
824 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
825 } else {
826 r = r600_cs_packet_next_reloc(p, &reloc);
827 if (r) {
828 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
829 return -EINVAL;
830 }
831 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
832 track->cb_color_tile_bo[tmp] = reloc->robj;
833 }
834 break;
835 case CB_COLOR0_BASE:
836 case CB_COLOR1_BASE:
837 case CB_COLOR2_BASE:
838 case CB_COLOR3_BASE:
839 case CB_COLOR4_BASE:
840 case CB_COLOR5_BASE:
841 case CB_COLOR6_BASE:
842 case CB_COLOR7_BASE:
843 r = r600_cs_packet_next_reloc(p, &reloc);
844 if (r) {
845 dev_warn(p->dev, "bad SET_CONTEXT_REG "
846 "0x%04X\n", reg);
847 return -EINVAL;
848 }
849 tmp = (reg - CB_COLOR0_BASE) / 4;
850 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
851 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
852 track->cb_color_base_last[tmp] = ib[idx];
853 track->cb_color_bo[tmp] = reloc->robj;
854 break;
855 case DB_DEPTH_BASE:
856 r = r600_cs_packet_next_reloc(p, &reloc);
857 if (r) {
858 dev_warn(p->dev, "bad SET_CONTEXT_REG "
859 "0x%04X\n", reg);
860 return -EINVAL;
861 }
862 track->db_offset = radeon_get_ib_value(p, idx);
863 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
864 track->db_bo = reloc->robj;
865 break;
866 case DB_HTILE_DATA_BASE:
867 case SQ_PGM_START_FS:
868 case SQ_PGM_START_ES:
869 case SQ_PGM_START_VS:
870 case SQ_PGM_START_GS:
871 case SQ_PGM_START_PS:
872 r = r600_cs_packet_next_reloc(p, &reloc);
873 if (r) {
874 dev_warn(p->dev, "bad SET_CONTEXT_REG "
875 "0x%04X\n", reg);
876 return -EINVAL;
877 }
878 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
879 break;
880 default:
881 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
882 return -EINVAL;
883 }
884 return 0;
885}
886
887static inline unsigned minify(unsigned size, unsigned levels)
888{
889 size = size >> levels;
890 if (size < 1)
891 size = 1;
892 return size;
893}
894
895static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
896 unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
897 unsigned *l0_size, unsigned *mipmap_size)
898{
899 unsigned offset, i, level, face;
900 unsigned width, height, depth, rowstride, size;
901
902 w0 = minify(w0, 0);
903 h0 = minify(h0, 0);
904 d0 = minify(d0, 0);
905 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
906 width = minify(w0, i);
907 height = minify(h0, i);
908 depth = minify(d0, i);
909 for(face = 0; face < nfaces; face++) {
910 rowstride = ((width * bpe) + 255) & ~255;
911 size = height * rowstride * depth;
912 offset += size;
913 offset = (offset + 0x1f) & ~0x1f;
914 }
915 }
916 *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
917 *mipmap_size = offset;
918 if (!blevel)
919 *mipmap_size -= *l0_size;
920 if (!nlevels)
921 *mipmap_size = *l0_size;
922}
923
924/**
925 * r600_check_texture_resource() - check if register is authorized or not
926 * @p: parser structure holding parsing context
927 * @idx: index into the cs buffer
928 * @texture: texture's bo structure
929 * @mipmap: mipmap's bo structure
930 *
931 * This function will check that the resource has valid field and that
932 * the texture and mipmap bo object are big enough to cover this resource.
933 */
934static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
935 struct radeon_bo *texture,
936 struct radeon_bo *mipmap)
937{
938 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
939 u32 word0, word1, l0_size, mipmap_size;
940
941 /* on legacy kernel we don't perform advanced check */
942 if (p->rdev == NULL)
943 return 0;
944 word0 = radeon_get_ib_value(p, idx + 0);
945 word1 = radeon_get_ib_value(p, idx + 1);
946 w0 = G_038000_TEX_WIDTH(word0) + 1;
947 h0 = G_038004_TEX_HEIGHT(word1) + 1;
948 d0 = G_038004_TEX_DEPTH(word1);
949 nfaces = 1;
950 switch (G_038000_DIM(word0)) {
951 case V_038000_SQ_TEX_DIM_1D:
952 case V_038000_SQ_TEX_DIM_2D:
953 case V_038000_SQ_TEX_DIM_3D:
954 break;
955 case V_038000_SQ_TEX_DIM_CUBEMAP:
956 nfaces = 6;
957 break;
958 case V_038000_SQ_TEX_DIM_1D_ARRAY:
959 case V_038000_SQ_TEX_DIM_2D_ARRAY:
960 case V_038000_SQ_TEX_DIM_2D_MSAA:
961 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
962 default:
963 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
964 return -EINVAL;
965 }
966 if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
967 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
968 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
969 return -EINVAL;
970 }
971 word0 = radeon_get_ib_value(p, idx + 4);
972 word1 = radeon_get_ib_value(p, idx + 5);
973 blevel = G_038010_BASE_LEVEL(word0);
974 nlevels = G_038014_LAST_LEVEL(word1);
975 r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
976 /* using get ib will give us the offset into the texture bo */
977 word0 = radeon_get_ib_value(p, idx + 2);
978 if ((l0_size + word0) > radeon_bo_size(texture)) {
979 dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
980 w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
981 return -EINVAL;
982 }
983 /* using get ib will give us the offset into the mipmap bo */
984 word0 = radeon_get_ib_value(p, idx + 3);
985 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
986 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
987 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
988 return -EINVAL;
989 }
990 return 0;
991}
992
362static int r600_packet3_check(struct radeon_cs_parser *p, 993static int r600_packet3_check(struct radeon_cs_parser *p,
363 struct radeon_cs_packet *pkt) 994 struct radeon_cs_packet *pkt)
364{ 995{
@@ -408,12 +1039,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
408 } 1039 }
409 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1040 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
410 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1041 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1042 r = r600_cs_track_check(p);
1043 if (r) {
1044 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1045 return r;
1046 }
411 break; 1047 break;
412 case PACKET3_DRAW_INDEX_AUTO: 1048 case PACKET3_DRAW_INDEX_AUTO:
413 if (pkt->count != 1) { 1049 if (pkt->count != 1) {
414 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1050 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
415 return -EINVAL; 1051 return -EINVAL;
416 } 1052 }
1053 r = r600_cs_track_check(p);
1054 if (r) {
1055 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1056 return r;
1057 }
417 break; 1058 break;
418 case PACKET3_DRAW_INDEX_IMMD_BE: 1059 case PACKET3_DRAW_INDEX_IMMD_BE:
419 case PACKET3_DRAW_INDEX_IMMD: 1060 case PACKET3_DRAW_INDEX_IMMD:
@@ -421,6 +1062,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
421 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1062 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
422 return -EINVAL; 1063 return -EINVAL;
423 } 1064 }
1065 r = r600_cs_track_check(p);
1066 if (r) {
1067 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1068 return r;
1069 }
424 break; 1070 break;
425 case PACKET3_WAIT_REG_MEM: 1071 case PACKET3_WAIT_REG_MEM:
426 if (pkt->count != 5) { 1072 if (pkt->count != 5) {
@@ -493,30 +1139,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
493 } 1139 }
494 for (i = 0; i < pkt->count; i++) { 1140 for (i = 0; i < pkt->count; i++) {
495 reg = start_reg + (4 * i); 1141 reg = start_reg + (4 * i);
496 switch (reg) { 1142 r = r600_cs_check_reg(p, reg, idx+1+i);
497 case SQ_ESGS_RING_BASE: 1143 if (r)
498 case SQ_GSVS_RING_BASE: 1144 return r;
499 case SQ_ESTMP_RING_BASE:
500 case SQ_GSTMP_RING_BASE:
501 case SQ_VSTMP_RING_BASE:
502 case SQ_PSTMP_RING_BASE:
503 case SQ_FBUF_RING_BASE:
504 case SQ_REDUC_RING_BASE:
505 case SX_MEMORY_EXPORT_BASE:
506 r = r600_cs_packet_next_reloc(p, &reloc);
507 if (r) {
508 DRM_ERROR("bad SET_CONFIG_REG "
509 "0x%04X\n", reg);
510 return -EINVAL;
511 }
512 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
513 break;
514 case CP_COHER_BASE:
515 /* use PACKET3_SURFACE_SYNC */
516 return -EINVAL;
517 default:
518 break;
519 }
520 } 1145 }
521 break; 1146 break;
522 case PACKET3_SET_CONTEXT_REG: 1147 case PACKET3_SET_CONTEXT_REG:
@@ -530,106 +1155,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
530 } 1155 }
531 for (i = 0; i < pkt->count; i++) { 1156 for (i = 0; i < pkt->count; i++) {
532 reg = start_reg + (4 * i); 1157 reg = start_reg + (4 * i);
533 switch (reg) { 1158 r = r600_cs_check_reg(p, reg, idx+1+i);
534 /* This register were added late, there is userspace 1159 if (r)
535 * which does provide relocation for those but set 1160 return r;
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
576 case DB_DEPTH_BASE:
577 case DB_HTILE_DATA_BASE:
578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
588 case CB_COLOR1_BASE:
589 case CB_COLOR2_BASE:
590 case CB_COLOR3_BASE:
591 case CB_COLOR4_BASE:
592 case CB_COLOR5_BASE:
593 case CB_COLOR6_BASE:
594 case CB_COLOR7_BASE:
595 case SQ_PGM_START_FS:
596 case SQ_PGM_START_ES:
597 case SQ_PGM_START_VS:
598 case SQ_PGM_START_GS:
599 case SQ_PGM_START_PS:
600 r = r600_cs_packet_next_reloc(p, &reloc);
601 if (r) {
602 DRM_ERROR("bad SET_CONTEXT_REG "
603 "0x%04X\n", reg);
604 return -EINVAL;
605 }
606 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
607 break;
608 case VGT_DMA_BASE:
609 case VGT_DMA_BASE_HI:
610 /* These should be handled by DRAW_INDEX packet 3 */
611 case VGT_STRMOUT_BASE_OFFSET_0:
612 case VGT_STRMOUT_BASE_OFFSET_1:
613 case VGT_STRMOUT_BASE_OFFSET_2:
614 case VGT_STRMOUT_BASE_OFFSET_3:
615 case VGT_STRMOUT_BASE_OFFSET_HI_0:
616 case VGT_STRMOUT_BASE_OFFSET_HI_1:
617 case VGT_STRMOUT_BASE_OFFSET_HI_2:
618 case VGT_STRMOUT_BASE_OFFSET_HI_3:
619 case VGT_STRMOUT_BUFFER_BASE_0:
620 case VGT_STRMOUT_BUFFER_BASE_1:
621 case VGT_STRMOUT_BUFFER_BASE_2:
622 case VGT_STRMOUT_BUFFER_BASE_3:
623 case VGT_STRMOUT_BUFFER_OFFSET_0:
624 case VGT_STRMOUT_BUFFER_OFFSET_1:
625 case VGT_STRMOUT_BUFFER_OFFSET_2:
626 case VGT_STRMOUT_BUFFER_OFFSET_3:
627 /* These should be handled by STRMOUT_BUFFER packet 3 */
628 DRM_ERROR("bad context reg: 0x%08x\n", reg);
629 return -EINVAL;
630 default:
631 break;
632 }
633 } 1161 }
634 break; 1162 break;
635 case PACKET3_SET_RESOURCE: 1163 case PACKET3_SET_RESOURCE:
@@ -646,6 +1174,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
646 return -EINVAL; 1174 return -EINVAL;
647 } 1175 }
648 for (i = 0; i < (pkt->count / 7); i++) { 1176 for (i = 0; i < (pkt->count / 7); i++) {
1177 struct radeon_bo *texture, *mipmap;
1178 u32 size, offset;
1179
649 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 1180 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
650 case SQ_TEX_VTX_VALID_TEXTURE: 1181 case SQ_TEX_VTX_VALID_TEXTURE:
651 /* tex base */ 1182 /* tex base */
@@ -655,6 +1186,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
655 return -EINVAL; 1186 return -EINVAL;
656 } 1187 }
657 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1188 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1189 texture = reloc->robj;
658 /* tex mip base */ 1190 /* tex mip base */
659 r = r600_cs_packet_next_reloc(p, &reloc); 1191 r = r600_cs_packet_next_reloc(p, &reloc);
660 if (r) { 1192 if (r) {
@@ -662,6 +1194,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
662 return -EINVAL; 1194 return -EINVAL;
663 } 1195 }
664 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1196 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1197 mipmap = reloc->robj;
1198 r = r600_check_texture_resource(p, idx+(i*7)+1,
1199 texture, mipmap);
1200 if (r)
1201 return r;
665 break; 1202 break;
666 case SQ_TEX_VTX_VALID_BUFFER: 1203 case SQ_TEX_VTX_VALID_BUFFER:
667 /* vtx base */ 1204 /* vtx base */
@@ -670,6 +1207,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
670 DRM_ERROR("bad SET_RESOURCE\n"); 1207 DRM_ERROR("bad SET_RESOURCE\n");
671 return -EINVAL; 1208 return -EINVAL;
672 } 1209 }
1210 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1211 size = radeon_get_ib_value(p, idx+1+(i*7)+1);
1212 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1213 /* force size to size of the buffer */
1214 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1215 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1216 }
673 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 1217 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
674 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1218 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
675 break; 1219 break;
@@ -760,11 +1304,28 @@ int r600_cs_parse(struct radeon_cs_parser *p)
760 struct r600_cs_track *track; 1304 struct r600_cs_track *track;
761 int r; 1305 int r;
762 1306
763 track = kzalloc(sizeof(*track), GFP_KERNEL); 1307 if (p->track == NULL) {
764 p->track = track; 1308 /* initialize tracker, we are in kms */
1309 track = kzalloc(sizeof(*track), GFP_KERNEL);
1310 if (track == NULL)
1311 return -ENOMEM;
1312 r600_cs_track_init(track);
1313 if (p->rdev->family < CHIP_RV770) {
1314 track->npipes = p->rdev->config.r600.tiling_npipes;
1315 track->nbanks = p->rdev->config.r600.tiling_nbanks;
1316 track->group_size = p->rdev->config.r600.tiling_group_size;
1317 } else if (p->rdev->family <= CHIP_RV740) {
1318 track->npipes = p->rdev->config.rv770.tiling_npipes;
1319 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1320 track->group_size = p->rdev->config.rv770.tiling_group_size;
1321 }
1322 p->track = track;
1323 }
765 do { 1324 do {
766 r = r600_cs_packet_parse(p, &pkt, p->idx); 1325 r = r600_cs_packet_parse(p, &pkt, p->idx);
767 if (r) { 1326 if (r) {
1327 kfree(p->track);
1328 p->track = NULL;
768 return r; 1329 return r;
769 } 1330 }
770 p->idx += pkt.count + 2; 1331 p->idx += pkt.count + 2;
@@ -779,9 +1340,13 @@ int r600_cs_parse(struct radeon_cs_parser *p)
779 break; 1340 break;
780 default: 1341 default:
781 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1342 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1343 kfree(p->track);
1344 p->track = NULL;
782 return -EINVAL; 1345 return -EINVAL;
783 } 1346 }
784 if (r) { 1347 if (r) {
1348 kfree(p->track);
1349 p->track = NULL;
785 return r; 1350 return r;
786 } 1351 }
787 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1352 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
@@ -791,6 +1356,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
791 mdelay(1); 1356 mdelay(1);
792 } 1357 }
793#endif 1358#endif
1359 kfree(p->track);
1360 p->track = NULL;
794 return 0; 1361 return 0;
795} 1362}
796 1363
@@ -833,9 +1400,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
833{ 1400{
834 struct radeon_cs_parser parser; 1401 struct radeon_cs_parser parser;
835 struct radeon_cs_chunk *ib_chunk; 1402 struct radeon_cs_chunk *ib_chunk;
836 struct radeon_ib fake_ib; 1403 struct radeon_ib fake_ib;
1404 struct r600_cs_track *track;
837 int r; 1405 int r;
838 1406
1407 /* initialize tracker */
1408 track = kzalloc(sizeof(*track), GFP_KERNEL);
1409 if (track == NULL)
1410 return -ENOMEM;
1411 r600_cs_track_init(track);
1412 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
839 /* initialize parser */ 1413 /* initialize parser */
840 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 1414 memset(&parser, 0, sizeof(struct radeon_cs_parser));
841 parser.filp = filp; 1415 parser.filp = filp;
@@ -843,6 +1417,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
843 parser.rdev = NULL; 1417 parser.rdev = NULL;
844 parser.family = family; 1418 parser.family = family;
845 parser.ib = &fake_ib; 1419 parser.ib = &fake_ib;
1420 parser.track = track;
846 fake_ib.ptr = ib; 1421 fake_ib.ptr = ib;
847 r = radeon_cs_parser_init(&parser, data); 1422 r = radeon_cs_parser_init(&parser, data);
848 if (r) { 1423 if (r) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 30480881aed1..5b2e4d442823 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -883,6 +883,16 @@
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885 885
886#define R_028C04_PA_SC_AA_CONFIG 0x028C04
887#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
888#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
889#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
890#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
891#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
892#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
893#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
894#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
895#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0 896#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) 897#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) 898#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -905,6 +915,461 @@
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4 915#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8 916#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC 917#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908 918#define R_0280A0_CB_COLOR0_INFO 0x0280A0
919#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
920#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
921#define C_0280A0_ENDIAN 0xFFFFFFFC
922#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
923#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
924#define C_0280A0_FORMAT 0xFFFFFF03
925#define V_0280A0_COLOR_INVALID 0x00000000
926#define V_0280A0_COLOR_8 0x00000001
927#define V_0280A0_COLOR_4_4 0x00000002
928#define V_0280A0_COLOR_3_3_2 0x00000003
929#define V_0280A0_COLOR_16 0x00000005
930#define V_0280A0_COLOR_16_FLOAT 0x00000006
931#define V_0280A0_COLOR_8_8 0x00000007
932#define V_0280A0_COLOR_5_6_5 0x00000008
933#define V_0280A0_COLOR_6_5_5 0x00000009
934#define V_0280A0_COLOR_1_5_5_5 0x0000000A
935#define V_0280A0_COLOR_4_4_4_4 0x0000000B
936#define V_0280A0_COLOR_5_5_5_1 0x0000000C
937#define V_0280A0_COLOR_32 0x0000000D
938#define V_0280A0_COLOR_32_FLOAT 0x0000000E
939#define V_0280A0_COLOR_16_16 0x0000000F
940#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
941#define V_0280A0_COLOR_8_24 0x00000011
942#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
943#define V_0280A0_COLOR_24_8 0x00000013
944#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
945#define V_0280A0_COLOR_10_11_11 0x00000015
946#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
947#define V_0280A0_COLOR_11_11_10 0x00000017
948#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
949#define V_0280A0_COLOR_2_10_10_10 0x00000019
950#define V_0280A0_COLOR_8_8_8_8 0x0000001A
951#define V_0280A0_COLOR_10_10_10_2 0x0000001B
952#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
953#define V_0280A0_COLOR_32_32 0x0000001D
954#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
955#define V_0280A0_COLOR_16_16_16_16 0x0000001F
956#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
957#define V_0280A0_COLOR_32_32_32_32 0x00000022
958#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
959#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
960#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
961#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
962#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
963#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
964#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
965#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
966#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
967#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
968#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
969#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
970#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
971#define C_0280A0_READ_SIZE 0xFFFF7FFF
972#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
973#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
974#define C_0280A0_COMP_SWAP 0xFFFCFFFF
975#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
976#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
977#define C_0280A0_TILE_MODE 0xFFF3FFFF
978#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
979#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
980#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
981#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
982#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
983#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
984#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
985#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
986#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
987#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
988#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
989#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
990#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
991#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
992#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
993#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
994#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
995#define C_0280A0_ROUND_MODE 0xFDFFFFFF
996#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
997#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
998#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
999#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
1000#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
1001#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
1002#define R_0280A4_CB_COLOR1_INFO 0x0280A4
1003#define R_0280A8_CB_COLOR2_INFO 0x0280A8
1004#define R_0280AC_CB_COLOR3_INFO 0x0280AC
1005#define R_0280B0_CB_COLOR4_INFO 0x0280B0
1006#define R_0280B4_CB_COLOR5_INFO 0x0280B4
1007#define R_0280B8_CB_COLOR6_INFO 0x0280B8
1008#define R_0280BC_CB_COLOR7_INFO 0x0280BC
1009#define R_028060_CB_COLOR0_SIZE 0x028060
1010#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
1011#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
1012#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
1013#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
1014#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
1015#define C_028060_SLICE_TILE_MAX 0xC00003FF
1016#define R_028064_CB_COLOR1_SIZE 0x028064
1017#define R_028068_CB_COLOR2_SIZE 0x028068
1018#define R_02806C_CB_COLOR3_SIZE 0x02806C
1019#define R_028070_CB_COLOR4_SIZE 0x028070
1020#define R_028074_CB_COLOR5_SIZE 0x028074
1021#define R_028078_CB_COLOR6_SIZE 0x028078
1022#define R_02807C_CB_COLOR7_SIZE 0x02807C
1023#define R_028238_CB_TARGET_MASK 0x028238
1024#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
1025#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
1026#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
1027#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
1028#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
1029#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
1030#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
1031#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
1032#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
1033#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
1034#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
1035#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
1036#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
1037#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
1038#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
1039#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
1040#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
1041#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
1042#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
1043#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
1044#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
1045#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
1046#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
1047#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
1048#define R_02823C_CB_SHADER_MASK 0x02823C
1049#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
1050#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
1051#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
1052#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
1053#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
1054#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
1055#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
1056#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
1057#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
1058#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
1059#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
1060#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
1061#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
1062#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
1063#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
1064#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
1065#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
1066#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
1067#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
1068#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
1069#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
1070#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
1071#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
1072#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
1073#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
1074#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
1075#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
1076#define C_028AB0_STREAMOUT 0xFFFFFFFE
1077#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
1078#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
1079#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
1080#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
1081#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
1082#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
1083#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
1084#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
1085#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
1086#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
1087#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
1088#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
1089#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
1090#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1091#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1092#define C_028B20_SIZE 0x00000000
1093#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
1094#define S_038000_DIM(x) (((x) & 0x7) << 0)
1095#define G_038000_DIM(x) (((x) >> 0) & 0x7)
1096#define C_038000_DIM 0xFFFFFFF8
1097#define V_038000_SQ_TEX_DIM_1D 0x00000000
1098#define V_038000_SQ_TEX_DIM_2D 0x00000001
1099#define V_038000_SQ_TEX_DIM_3D 0x00000002
1100#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
1101#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
1102#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
1103#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
1104#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
1105#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
1106#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
1107#define C_038000_TILE_MODE 0xFFFFFF87
1108#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
1109#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
1110#define C_038000_TILE_TYPE 0xFFFFFF7F
1111#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
1112#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
1113#define C_038000_PITCH 0xFFF800FF
1114#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
1115#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
1116#define C_038000_TEX_WIDTH 0x0007FFFF
1117#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
1118#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
1119#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
1120#define C_038004_TEX_HEIGHT 0xFFFFE000
1121#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
1122#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
1123#define C_038004_TEX_DEPTH 0xFC001FFF
1124#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
1125#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
1126#define C_038004_DATA_FORMAT 0x03FFFFFF
1127#define V_038004_COLOR_INVALID 0x00000000
1128#define V_038004_COLOR_8 0x00000001
1129#define V_038004_COLOR_4_4 0x00000002
1130#define V_038004_COLOR_3_3_2 0x00000003
1131#define V_038004_COLOR_16 0x00000005
1132#define V_038004_COLOR_16_FLOAT 0x00000006
1133#define V_038004_COLOR_8_8 0x00000007
1134#define V_038004_COLOR_5_6_5 0x00000008
1135#define V_038004_COLOR_6_5_5 0x00000009
1136#define V_038004_COLOR_1_5_5_5 0x0000000A
1137#define V_038004_COLOR_4_4_4_4 0x0000000B
1138#define V_038004_COLOR_5_5_5_1 0x0000000C
1139#define V_038004_COLOR_32 0x0000000D
1140#define V_038004_COLOR_32_FLOAT 0x0000000E
1141#define V_038004_COLOR_16_16 0x0000000F
1142#define V_038004_COLOR_16_16_FLOAT 0x00000010
1143#define V_038004_COLOR_8_24 0x00000011
1144#define V_038004_COLOR_8_24_FLOAT 0x00000012
1145#define V_038004_COLOR_24_8 0x00000013
1146#define V_038004_COLOR_24_8_FLOAT 0x00000014
1147#define V_038004_COLOR_10_11_11 0x00000015
1148#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
1149#define V_038004_COLOR_11_11_10 0x00000017
1150#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
1151#define V_038004_COLOR_2_10_10_10 0x00000019
1152#define V_038004_COLOR_8_8_8_8 0x0000001A
1153#define V_038004_COLOR_10_10_10_2 0x0000001B
1154#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
1155#define V_038004_COLOR_32_32 0x0000001D
1156#define V_038004_COLOR_32_32_FLOAT 0x0000001E
1157#define V_038004_COLOR_16_16_16_16 0x0000001F
1158#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
1159#define V_038004_COLOR_32_32_32_32 0x00000022
1160#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
1161#define V_038004_FMT_1 0x00000025
1162#define V_038004_FMT_GB_GR 0x00000027
1163#define V_038004_FMT_BG_RG 0x00000028
1164#define V_038004_FMT_32_AS_8 0x00000029
1165#define V_038004_FMT_32_AS_8_8 0x0000002A
1166#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
1167#define V_038004_FMT_8_8_8 0x0000002C
1168#define V_038004_FMT_16_16_16 0x0000002D
1169#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
1170#define V_038004_FMT_32_32_32 0x0000002F
1171#define V_038004_FMT_32_32_32_FLOAT 0x00000030
1172#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
1173#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
1174#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
1175#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
1176#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
1177#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
1178#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
1179#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
1180#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
1181#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
1182#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
1183#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
1184#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
1185#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
1186#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
1187#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
1188#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
1189#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
1190#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
1191#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
1192#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
1193#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
1194#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
1195#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
1196#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
1197#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
1198#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
1199#define C_038010_REQUEST_SIZE 0xFFFF3FFF
1200#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
1201#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
1202#define C_038010_DST_SEL_X 0xFFF8FFFF
1203#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
1204#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
1205#define C_038010_DST_SEL_Y 0xFFC7FFFF
1206#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
1207#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
1208#define C_038010_DST_SEL_Z 0xFE3FFFFF
1209#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
1210#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
1211#define C_038010_DST_SEL_W 0xF1FFFFFF
1212#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
1213#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
1214#define C_038010_BASE_LEVEL 0x0FFFFFFF
1215#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
1216#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
1217#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
1218#define C_038014_LAST_LEVEL 0xFFFFFFF0
1219#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
1220#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
1221#define C_038014_BASE_ARRAY 0xFFFE000F
1222#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
1223#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
1224#define C_038014_LAST_ARRAY 0xC001FFFF
1225#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
1226#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1227#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1228#define C_0288A8_ITEMSIZE 0xFFFF8000
1229#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
1230#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1231#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1232#define C_008C44_MEM_SIZE 0x00000000
1233#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
1234#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1235#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1236#define C_0288B0_ITEMSIZE 0xFFFF8000
1237#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
1238#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1239#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1240#define C_008C54_MEM_SIZE 0x00000000
1241#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
1242#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1243#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1244#define C_0288C0_ITEMSIZE 0xFFFF8000
1245#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
1246#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1247#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1248#define C_008C74_MEM_SIZE 0x00000000
1249#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
1250#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1251#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1252#define C_0288B4_ITEMSIZE 0xFFFF8000
1253#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
1254#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1255#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1256#define C_008C5C_MEM_SIZE 0x00000000
1257#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
1258#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1259#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1260#define C_0288AC_ITEMSIZE 0xFFFF8000
1261#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
1262#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1263#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1264#define C_008C4C_MEM_SIZE 0x00000000
1265#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
1266#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1267#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1268#define C_0288BC_ITEMSIZE 0xFFFF8000
1269#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
1270#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1271#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1272#define C_008C6C_MEM_SIZE 0x00000000
1273#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
1274#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1275#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1276#define C_0288C4_ITEMSIZE 0xFFFF8000
1277#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
1278#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1279#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1280#define C_008C7C_MEM_SIZE 0x00000000
1281#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
1282#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1283#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1284#define C_0288B8_ITEMSIZE 0xFFFF8000
1285#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
1286#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1287#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1288#define C_008C64_MEM_SIZE 0x00000000
1289#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
1290#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1291#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1292#define C_0288C8_ITEMSIZE 0xFFFF8000
1293#define R_028010_DB_DEPTH_INFO 0x028010
1294#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
1295#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
1296#define C_028010_FORMAT 0xFFFFFFF8
1297#define V_028010_DEPTH_INVALID 0x00000000
1298#define V_028010_DEPTH_16 0x00000001
1299#define V_028010_DEPTH_X8_24 0x00000002
1300#define V_028010_DEPTH_8_24 0x00000003
1301#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
1302#define V_028010_DEPTH_8_24_FLOAT 0x00000005
1303#define V_028010_DEPTH_32_FLOAT 0x00000006
1304#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
1305#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
1306#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
1307#define C_028010_READ_SIZE 0xFFFFFFF7
1308#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
1309#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
1310#define C_028010_ARRAY_MODE 0xFFF87FFF
1311#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
1312#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
1313#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
1314#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
1315#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
1316#define C_028010_TILE_COMPACT 0xFBFFFFFF
1317#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
1318#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
1319#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
1320#define R_028000_DB_DEPTH_SIZE 0x028000
1321#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
1322#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
1323#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
1324#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
1325#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
1326#define C_028000_SLICE_TILE_MAX 0xC00003FF
1327#define R_028004_DB_DEPTH_VIEW 0x028004
1328#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
1329#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
1330#define C_028004_SLICE_START 0xFFFFF800
1331#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
1332#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
1333#define C_028004_SLICE_MAX 0xFF001FFF
1334#define R_028800_DB_DEPTH_CONTROL 0x028800
1335#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
1336#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
1337#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
1338#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
1339#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
1340#define C_028800_Z_ENABLE 0xFFFFFFFD
1341#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
1342#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
1343#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
1344#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
1345#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
1346#define C_028800_ZFUNC 0xFFFFFF8F
1347#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
1348#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
1349#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
1350#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
1351#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
1352#define C_028800_STENCILFUNC 0xFFFFF8FF
1353#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
1354#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
1355#define C_028800_STENCILFAIL 0xFFFFC7FF
1356#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
1357#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
1358#define C_028800_STENCILZPASS 0xFFFE3FFF
1359#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
1360#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
1361#define C_028800_STENCILZFAIL 0xFFF1FFFF
1362#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
1363#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
1364#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
1365#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
1366#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
1367#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
1368#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
1369#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
1370#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
1371#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
1372#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
1373#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
909 1374
910#endif 1375#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c0356bb193e5..829e26e8a4bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,6 +89,7 @@ extern int radeon_testing;
89extern int radeon_connector_table; 89extern int radeon_connector_table;
90extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_dynpm;
92extern int radeon_audio; 93extern int radeon_audio;
93 94
94/* 95/*
@@ -118,6 +119,21 @@ struct radeon_device;
118/* 119/*
119 * BIOS. 120 * BIOS.
120 */ 121 */
122#define ATRM_BIOS_PAGE 4096
123
124#if defined(CONFIG_VGA_SWITCHEROO)
125bool radeon_atrm_supported(struct pci_dev *pdev);
126int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
127#else
128static inline bool radeon_atrm_supported(struct pci_dev *pdev)
129{
130 return false;
131}
132
133static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
134 return -EINVAL;
135}
136#endif
121bool radeon_get_bios(struct radeon_device *rdev); 137bool radeon_get_bios(struct radeon_device *rdev);
122 138
123 139
@@ -138,17 +154,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev);
138struct radeon_clock { 154struct radeon_clock {
139 struct radeon_pll p1pll; 155 struct radeon_pll p1pll;
140 struct radeon_pll p2pll; 156 struct radeon_pll p2pll;
157 struct radeon_pll dcpll;
141 struct radeon_pll spll; 158 struct radeon_pll spll;
142 struct radeon_pll mpll; 159 struct radeon_pll mpll;
143 /* 10 Khz units */ 160 /* 10 Khz units */
144 uint32_t default_mclk; 161 uint32_t default_mclk;
145 uint32_t default_sclk; 162 uint32_t default_sclk;
163 uint32_t default_dispclk;
164 uint32_t dp_extclk;
146}; 165};
147 166
148/* 167/*
149 * Power management 168 * Power management
150 */ 169 */
151int radeon_pm_init(struct radeon_device *rdev); 170int radeon_pm_init(struct radeon_device *rdev);
171void radeon_pm_compute_clocks(struct radeon_device *rdev);
172void radeon_combios_get_power_modes(struct radeon_device *rdev);
173void radeon_atombios_get_power_modes(struct radeon_device *rdev);
152 174
153/* 175/*
154 * Fences. 176 * Fences.
@@ -275,6 +297,7 @@ union radeon_gart_table {
275}; 297};
276 298
277#define RADEON_GPU_PAGE_SIZE 4096 299#define RADEON_GPU_PAGE_SIZE 4096
300#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
278 301
279struct radeon_gart { 302struct radeon_gart {
280 dma_addr_t table_addr; 303 dma_addr_t table_addr;
@@ -309,21 +332,19 @@ struct radeon_mc {
309 /* for some chips with <= 32MB we need to lie 332 /* for some chips with <= 32MB we need to lie
310 * about vram size near mc fb location */ 333 * about vram size near mc fb location */
311 u64 mc_vram_size; 334 u64 mc_vram_size;
312 u64 gtt_location; 335 u64 visible_vram_size;
313 u64 gtt_size; 336 u64 gtt_size;
314 u64 gtt_start; 337 u64 gtt_start;
315 u64 gtt_end; 338 u64 gtt_end;
316 u64 vram_location;
317 u64 vram_start; 339 u64 vram_start;
318 u64 vram_end; 340 u64 vram_end;
319 unsigned vram_width; 341 unsigned vram_width;
320 u64 real_vram_size; 342 u64 real_vram_size;
321 int vram_mtrr; 343 int vram_mtrr;
322 bool vram_is_ddr; 344 bool vram_is_ddr;
323 bool igp_sideport_enabled; 345 bool igp_sideport_enabled;
324}; 346};
325 347
326int radeon_mc_setup(struct radeon_device *rdev);
327bool radeon_combios_sideport_present(struct radeon_device *rdev); 348bool radeon_combios_sideport_present(struct radeon_device *rdev);
328bool radeon_atombios_sideport_present(struct radeon_device *rdev); 349bool radeon_atombios_sideport_present(struct radeon_device *rdev);
329 350
@@ -348,6 +369,7 @@ struct radeon_irq {
348 bool sw_int; 369 bool sw_int;
349 /* FIXME: use a define max crtc rather than hardcode it */ 370 /* FIXME: use a define max crtc rather than hardcode it */
350 bool crtc_vblank_int[2]; 371 bool crtc_vblank_int[2];
372 wait_queue_head_t vblank_queue;
351 /* FIXME: use defines for max hpd/dacs */ 373 /* FIXME: use defines for max hpd/dacs */
352 bool hpd[6]; 374 bool hpd[6];
353 spinlock_t sw_lock; 375 spinlock_t sw_lock;
@@ -379,6 +401,7 @@ struct radeon_ib {
379struct radeon_ib_pool { 401struct radeon_ib_pool {
380 struct mutex mutex; 402 struct mutex mutex;
381 struct radeon_bo *robj; 403 struct radeon_bo *robj;
404 struct list_head bogus_ib;
382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 405 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
383 bool ready; 406 bool ready;
384 unsigned head_id; 407 unsigned head_id;
@@ -433,6 +456,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
433int radeon_ib_pool_init(struct radeon_device *rdev); 456int radeon_ib_pool_init(struct radeon_device *rdev);
434void radeon_ib_pool_fini(struct radeon_device *rdev); 457void radeon_ib_pool_fini(struct radeon_device *rdev);
435int radeon_ib_test(struct radeon_device *rdev); 458int radeon_ib_test(struct radeon_device *rdev);
459extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
436/* Ring access between begin & end cannot sleep */ 460/* Ring access between begin & end cannot sleep */
437void radeon_ring_free_size(struct radeon_device *rdev); 461void radeon_ring_free_size(struct radeon_device *rdev);
438int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); 462int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
@@ -570,7 +594,99 @@ struct radeon_wb {
570 * Equation between gpu/memory clock and available bandwidth is hw dependent 594 * Equation between gpu/memory clock and available bandwidth is hw dependent
571 * (type of memory, bus size, efficiency, ...) 595 * (type of memory, bus size, efficiency, ...)
572 */ 596 */
597enum radeon_pm_state {
598 PM_STATE_DISABLED,
599 PM_STATE_MINIMUM,
600 PM_STATE_PAUSED,
601 PM_STATE_ACTIVE
602};
603enum radeon_pm_action {
604 PM_ACTION_NONE,
605 PM_ACTION_MINIMUM,
606 PM_ACTION_DOWNCLOCK,
607 PM_ACTION_UPCLOCK
608};
609
610enum radeon_voltage_type {
611 VOLTAGE_NONE = 0,
612 VOLTAGE_GPIO,
613 VOLTAGE_VDDC,
614 VOLTAGE_SW
615};
616
617enum radeon_pm_state_type {
618 POWER_STATE_TYPE_DEFAULT,
619 POWER_STATE_TYPE_POWERSAVE,
620 POWER_STATE_TYPE_BATTERY,
621 POWER_STATE_TYPE_BALANCED,
622 POWER_STATE_TYPE_PERFORMANCE,
623};
624
625enum radeon_pm_clock_mode_type {
626 POWER_MODE_TYPE_DEFAULT,
627 POWER_MODE_TYPE_LOW,
628 POWER_MODE_TYPE_MID,
629 POWER_MODE_TYPE_HIGH,
630};
631
632struct radeon_voltage {
633 enum radeon_voltage_type type;
634 /* gpio voltage */
635 struct radeon_gpio_rec gpio;
636 u32 delay; /* delay in usec from voltage drop to sclk change */
637 bool active_high; /* voltage drop is active when bit is high */
638 /* VDDC voltage */
639 u8 vddc_id; /* index into vddc voltage table */
640 u8 vddci_id; /* index into vddci voltage table */
641 bool vddci_enabled;
642 /* r6xx+ sw */
643 u32 voltage;
644};
645
646struct radeon_pm_non_clock_info {
647 /* pcie lanes */
648 int pcie_lanes;
649 /* standardized non-clock flags */
650 u32 flags;
651};
652
653struct radeon_pm_clock_info {
654 /* memory clock */
655 u32 mclk;
656 /* engine clock */
657 u32 sclk;
658 /* voltage info */
659 struct radeon_voltage voltage;
660 /* standardized clock flags - not sure we'll need these */
661 u32 flags;
662};
663
664struct radeon_power_state {
665 enum radeon_pm_state_type type;
666 /* XXX: use a define for num clock modes */
667 struct radeon_pm_clock_info clock_info[8];
668 /* number of valid clock modes in this power state */
669 int num_clock_modes;
670 struct radeon_pm_clock_info *default_clock_mode;
671 /* non clock info about this state */
672 struct radeon_pm_non_clock_info non_clock_info;
673 bool voltage_drop_active;
674};
675
676/*
677 * Some modes are overclocked by very low value, accept them
678 */
679#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
680
573struct radeon_pm { 681struct radeon_pm {
682 struct mutex mutex;
683 struct delayed_work idle_work;
684 enum radeon_pm_state state;
685 enum radeon_pm_action planned_action;
686 unsigned long action_timeout;
687 bool downclocked;
688 int active_crtcs;
689 int req_vblank;
574 fixed20_12 max_bandwidth; 690 fixed20_12 max_bandwidth;
575 fixed20_12 igp_sideport_mclk; 691 fixed20_12 igp_sideport_mclk;
576 fixed20_12 igp_system_mclk; 692 fixed20_12 igp_system_mclk;
@@ -582,6 +698,15 @@ struct radeon_pm {
582 fixed20_12 core_bandwidth; 698 fixed20_12 core_bandwidth;
583 fixed20_12 sclk; 699 fixed20_12 sclk;
584 fixed20_12 needed_bandwidth; 700 fixed20_12 needed_bandwidth;
701 /* XXX: use a define for num power modes */
702 struct radeon_power_state power_state[8];
703 /* number of valid power states */
704 int num_power_states;
705 struct radeon_power_state *current_power_state;
706 struct radeon_pm_clock_info *current_clock_mode;
707 struct radeon_power_state *requested_power_state;
708 struct radeon_pm_clock_info *requested_clock_mode;
709 struct radeon_power_state *default_power_state;
585}; 710};
586 711
587 712
@@ -651,6 +776,7 @@ struct radeon_asic {
651 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 776 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
652 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 777 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
653 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 778 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
779 int (*get_pcie_lanes)(struct radeon_device *rdev);
654 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 780 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
655 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 781 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
656 int (*set_surface_reg)(struct radeon_device *rdev, int reg, 782 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
@@ -701,6 +827,9 @@ struct r600_asic {
701 unsigned sx_max_export_pos_size; 827 unsigned sx_max_export_pos_size;
702 unsigned sx_max_export_smx_size; 828 unsigned sx_max_export_smx_size;
703 unsigned sq_num_cf_insts; 829 unsigned sq_num_cf_insts;
830 unsigned tiling_nbanks;
831 unsigned tiling_npipes;
832 unsigned tiling_group_size;
704}; 833};
705 834
706struct rv770_asic { 835struct rv770_asic {
@@ -721,6 +850,9 @@ struct rv770_asic {
721 unsigned sc_prim_fifo_size; 850 unsigned sc_prim_fifo_size;
722 unsigned sc_hiz_tile_fifo_size; 851 unsigned sc_hiz_tile_fifo_size;
723 unsigned sc_earlyz_tile_fifo_fize; 852 unsigned sc_earlyz_tile_fifo_fize;
853 unsigned tiling_nbanks;
854 unsigned tiling_npipes;
855 unsigned tiling_group_size;
724}; 856};
725 857
726union radeon_asic_config { 858union radeon_asic_config {
@@ -830,6 +962,8 @@ struct radeon_device {
830 struct r600_ih ih; /* r6/700 interrupt ring */ 962 struct r600_ih ih; /* r6/700 interrupt ring */
831 struct workqueue_struct *wq; 963 struct workqueue_struct *wq;
832 struct work_struct hotplug_work; 964 struct work_struct hotplug_work;
965 int num_crtc; /* number of crtcs */
966 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
833 967
834 /* audio stuff */ 968 /* audio stuff */
835 struct timer_list audio_timer; 969 struct timer_list audio_timer;
@@ -838,6 +972,8 @@ struct radeon_device {
838 int audio_bits_per_sample; 972 int audio_bits_per_sample;
839 uint8_t audio_status_bits; 973 uint8_t audio_status_bits;
840 uint8_t audio_category_code; 974 uint8_t audio_category_code;
975
976 bool powered_down;
841}; 977};
842 978
843int radeon_device_init(struct radeon_device *rdev, 979int radeon_device_init(struct radeon_device *rdev,
@@ -895,6 +1031,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
895#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1031#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
896#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1032#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
897#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1033#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1034#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1035#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
898#define WREG32_P(reg, val, mask) \ 1036#define WREG32_P(reg, val, mask) \
899 do { \ 1037 do { \
900 uint32_t tmp_ = RREG32(reg); \ 1038 uint32_t tmp_ = RREG32(reg); \
@@ -956,7 +1094,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
956#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1094#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
957#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1095#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
958#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1096#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
959 1097#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
960 1098
961/* 1099/*
962 * BIOS helpers. 1100 * BIOS helpers.
@@ -1015,6 +1153,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1015#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 1153#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
1016#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) 1154#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
1017#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) 1155#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
1156#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
1018#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 1157#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
1019#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 1158#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
1020#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 1159#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
@@ -1029,6 +1168,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1029/* AGP */ 1168/* AGP */
1030extern void radeon_agp_disable(struct radeon_device *rdev); 1169extern void radeon_agp_disable(struct radeon_device *rdev);
1031extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1170extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1171extern void radeon_gart_restore(struct radeon_device *rdev);
1032extern int radeon_modeset_init(struct radeon_device *rdev); 1172extern int radeon_modeset_init(struct radeon_device *rdev);
1033extern void radeon_modeset_fini(struct radeon_device *rdev); 1173extern void radeon_modeset_fini(struct radeon_device *rdev);
1034extern bool radeon_card_posted(struct radeon_device *rdev); 1174extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1042,6 +1182,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
1042extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1182extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1043extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1183extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1044extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 1184extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1185extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1186extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1187extern int radeon_resume_kms(struct drm_device *dev);
1188extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1045 1189
1046/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1190/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1047struct r100_mc_save { 1191struct r100_mc_save {
@@ -1096,7 +1240,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev);
1096/* r300,r350,rv350,rv370,rv380 */ 1240/* r300,r350,rv350,rv370,rv380 */
1097extern void r300_set_reg_safe(struct radeon_device *rdev); 1241extern void r300_set_reg_safe(struct radeon_device *rdev);
1098extern void r300_mc_program(struct radeon_device *rdev); 1242extern void r300_mc_program(struct radeon_device *rdev);
1099extern void r300_vram_info(struct radeon_device *rdev); 1243extern void r300_mc_init(struct radeon_device *rdev);
1100extern void r300_clock_startup(struct radeon_device *rdev); 1244extern void r300_clock_startup(struct radeon_device *rdev);
1101extern int r300_mc_wait_for_idle(struct radeon_device *rdev); 1245extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
1102extern int rv370_pcie_gart_init(struct radeon_device *rdev); 1246extern int rv370_pcie_gart_init(struct radeon_device *rdev);
@@ -1105,7 +1249,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
1105extern void rv370_pcie_gart_disable(struct radeon_device *rdev); 1249extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
1106 1250
1107/* r420,r423,rv410 */ 1251/* r420,r423,rv410 */
1108extern int r420_mc_init(struct radeon_device *rdev);
1109extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); 1252extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
1110extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1253extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1111extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); 1254extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
@@ -1147,13 +1290,13 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
1147 struct drm_display_mode *mode2); 1290 struct drm_display_mode *mode2);
1148 1291
1149/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1292/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
1293extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1150extern bool r600_card_posted(struct radeon_device *rdev); 1294extern bool r600_card_posted(struct radeon_device *rdev);
1151extern void r600_cp_stop(struct radeon_device *rdev); 1295extern void r600_cp_stop(struct radeon_device *rdev);
1152extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1296extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1153extern int r600_cp_resume(struct radeon_device *rdev); 1297extern int r600_cp_resume(struct radeon_device *rdev);
1154extern void r600_cp_fini(struct radeon_device *rdev); 1298extern void r600_cp_fini(struct radeon_device *rdev);
1155extern int r600_count_pipe_bits(uint32_t val); 1299extern int r600_count_pipe_bits(uint32_t val);
1156extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1157extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1300extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
1158extern int r600_pcie_gart_init(struct radeon_device *rdev); 1301extern int r600_pcie_gart_init(struct radeon_device *rdev);
1159extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); 1302extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -1189,6 +1332,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
1189 uint8_t status_bits, 1332 uint8_t status_bits,
1190 uint8_t category_code); 1333 uint8_t category_code);
1191 1334
1335/* evergreen */
1336struct evergreen_mc_save {
1337 u32 vga_control[6];
1338 u32 vga_render_control;
1339 u32 vga_hdp_control;
1340 u32 crtc_control[6];
1341};
1342
1192#include "radeon_object.h" 1343#include "radeon_object.h"
1193 1344
1194#endif 1345#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c0681a5556dc..c4457791dff1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev)
237 237
238 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; 238 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
239 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; 239 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
240 rdev->mc.gtt_start = rdev->mc.agp_base;
241 rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
242 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
243 rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
240 244
241 /* workaround some hw issues */ 245 /* workaround some hw issues */
242 if (rdev->family < CHIP_R200) { 246 if (rdev->family < CHIP_R200) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 05ee1aeac3fd..d3a157b2bcb7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
44 44
45/* 45/*
46 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 46 * r100,rv100,rs100,rv200,rs200
47 */ 47 */
48extern int r100_init(struct radeon_device *rdev); 48extern int r100_init(struct radeon_device *rdev);
49extern void r100_fini(struct radeon_device *rdev); 49extern void r100_fini(struct radeon_device *rdev);
@@ -108,6 +108,52 @@ static struct radeon_asic r100_asic = {
108 .set_engine_clock = &radeon_legacy_set_engine_clock, 108 .set_engine_clock = &radeon_legacy_set_engine_clock,
109 .get_memory_clock = &radeon_legacy_get_memory_clock, 109 .get_memory_clock = &radeon_legacy_get_memory_clock,
110 .set_memory_clock = NULL, 110 .set_memory_clock = NULL,
111 .get_pcie_lanes = NULL,
112 .set_pcie_lanes = NULL,
113 .set_clock_gating = &radeon_legacy_set_clock_gating,
114 .set_surface_reg = r100_set_surface_reg,
115 .clear_surface_reg = r100_clear_surface_reg,
116 .bandwidth_update = &r100_bandwidth_update,
117 .hpd_init = &r100_hpd_init,
118 .hpd_fini = &r100_hpd_fini,
119 .hpd_sense = &r100_hpd_sense,
120 .hpd_set_polarity = &r100_hpd_set_polarity,
121 .ioctl_wait_idle = NULL,
122};
123
124/*
125 * r200,rv250,rs300,rv280
126 */
127extern int r200_copy_dma(struct radeon_device *rdev,
128 uint64_t src_offset,
129 uint64_t dst_offset,
130 unsigned num_pages,
131 struct radeon_fence *fence);
132static struct radeon_asic r200_asic = {
133 .init = &r100_init,
134 .fini = &r100_fini,
135 .suspend = &r100_suspend,
136 .resume = &r100_resume,
137 .vga_set_state = &r100_vga_set_state,
138 .gpu_reset = &r100_gpu_reset,
139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
140 .gart_set_page = &r100_pci_gart_set_page,
141 .cp_commit = &r100_cp_commit,
142 .ring_start = &r100_ring_start,
143 .ring_test = &r100_ring_test,
144 .ring_ib_execute = &r100_ring_ib_execute,
145 .irq_set = &r100_irq_set,
146 .irq_process = &r100_irq_process,
147 .get_vblank_counter = &r100_get_vblank_counter,
148 .fence_ring_emit = &r100_fence_ring_emit,
149 .cs_parse = &r100_cs_parse,
150 .copy_blit = &r100_copy_blit,
151 .copy_dma = &r200_copy_dma,
152 .copy = &r100_copy_blit,
153 .get_engine_clock = &radeon_legacy_get_engine_clock,
154 .set_engine_clock = &radeon_legacy_set_engine_clock,
155 .get_memory_clock = &radeon_legacy_get_memory_clock,
156 .set_memory_clock = NULL,
111 .set_pcie_lanes = NULL, 157 .set_pcie_lanes = NULL,
112 .set_clock_gating = &radeon_legacy_set_clock_gating, 158 .set_clock_gating = &radeon_legacy_set_clock_gating,
113 .set_surface_reg = r100_set_surface_reg, 159 .set_surface_reg = r100_set_surface_reg,
@@ -138,11 +184,8 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
138extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 184extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
139extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 185extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
140extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 186extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
141extern int r300_copy_dma(struct radeon_device *rdev, 187extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
142 uint64_t src_offset, 188
143 uint64_t dst_offset,
144 unsigned num_pages,
145 struct radeon_fence *fence);
146static struct radeon_asic r300_asic = { 189static struct radeon_asic r300_asic = {
147 .init = &r300_init, 190 .init = &r300_init,
148 .fini = &r300_fini, 191 .fini = &r300_fini,
@@ -162,7 +205,46 @@ static struct radeon_asic r300_asic = {
162 .fence_ring_emit = &r300_fence_ring_emit, 205 .fence_ring_emit = &r300_fence_ring_emit,
163 .cs_parse = &r300_cs_parse, 206 .cs_parse = &r300_cs_parse,
164 .copy_blit = &r100_copy_blit, 207 .copy_blit = &r100_copy_blit,
165 .copy_dma = &r300_copy_dma, 208 .copy_dma = &r200_copy_dma,
209 .copy = &r100_copy_blit,
210 .get_engine_clock = &radeon_legacy_get_engine_clock,
211 .set_engine_clock = &radeon_legacy_set_engine_clock,
212 .get_memory_clock = &radeon_legacy_get_memory_clock,
213 .set_memory_clock = NULL,
214 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_legacy_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg,
218 .clear_surface_reg = r100_clear_surface_reg,
219 .bandwidth_update = &r100_bandwidth_update,
220 .hpd_init = &r100_hpd_init,
221 .hpd_fini = &r100_hpd_fini,
222 .hpd_sense = &r100_hpd_sense,
223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
225};
226
227
228static struct radeon_asic r300_asic_pcie = {
229 .init = &r300_init,
230 .fini = &r300_fini,
231 .suspend = &r300_suspend,
232 .resume = &r300_resume,
233 .vga_set_state = &r100_vga_set_state,
234 .gpu_reset = &r300_gpu_reset,
235 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
236 .gart_set_page = &rv370_pcie_gart_set_page,
237 .cp_commit = &r100_cp_commit,
238 .ring_start = &r300_ring_start,
239 .ring_test = &r100_ring_test,
240 .ring_ib_execute = &r100_ring_ib_execute,
241 .irq_set = &r100_irq_set,
242 .irq_process = &r100_irq_process,
243 .get_vblank_counter = &r100_get_vblank_counter,
244 .fence_ring_emit = &r300_fence_ring_emit,
245 .cs_parse = &r300_cs_parse,
246 .copy_blit = &r100_copy_blit,
247 .copy_dma = &r200_copy_dma,
166 .copy = &r100_copy_blit, 248 .copy = &r100_copy_blit,
167 .get_engine_clock = &radeon_legacy_get_engine_clock, 249 .get_engine_clock = &radeon_legacy_get_engine_clock,
168 .set_engine_clock = &radeon_legacy_set_engine_clock, 250 .set_engine_clock = &radeon_legacy_set_engine_clock,
@@ -206,12 +288,13 @@ static struct radeon_asic r420_asic = {
206 .fence_ring_emit = &r300_fence_ring_emit, 288 .fence_ring_emit = &r300_fence_ring_emit,
207 .cs_parse = &r300_cs_parse, 289 .cs_parse = &r300_cs_parse,
208 .copy_blit = &r100_copy_blit, 290 .copy_blit = &r100_copy_blit,
209 .copy_dma = &r300_copy_dma, 291 .copy_dma = &r200_copy_dma,
210 .copy = &r100_copy_blit, 292 .copy = &r100_copy_blit,
211 .get_engine_clock = &radeon_atom_get_engine_clock, 293 .get_engine_clock = &radeon_atom_get_engine_clock,
212 .set_engine_clock = &radeon_atom_set_engine_clock, 294 .set_engine_clock = &radeon_atom_set_engine_clock,
213 .get_memory_clock = &radeon_atom_get_memory_clock, 295 .get_memory_clock = &radeon_atom_get_memory_clock,
214 .set_memory_clock = &radeon_atom_set_memory_clock, 296 .set_memory_clock = &radeon_atom_set_memory_clock,
297 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes, 298 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_atom_set_clock_gating, 299 .set_clock_gating = &radeon_atom_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg, 300 .set_surface_reg = r100_set_surface_reg,
@@ -255,12 +338,13 @@ static struct radeon_asic rs400_asic = {
255 .fence_ring_emit = &r300_fence_ring_emit, 338 .fence_ring_emit = &r300_fence_ring_emit,
256 .cs_parse = &r300_cs_parse, 339 .cs_parse = &r300_cs_parse,
257 .copy_blit = &r100_copy_blit, 340 .copy_blit = &r100_copy_blit,
258 .copy_dma = &r300_copy_dma, 341 .copy_dma = &r200_copy_dma,
259 .copy = &r100_copy_blit, 342 .copy = &r100_copy_blit,
260 .get_engine_clock = &radeon_legacy_get_engine_clock, 343 .get_engine_clock = &radeon_legacy_get_engine_clock,
261 .set_engine_clock = &radeon_legacy_set_engine_clock, 344 .set_engine_clock = &radeon_legacy_set_engine_clock,
262 .get_memory_clock = &radeon_legacy_get_memory_clock, 345 .get_memory_clock = &radeon_legacy_get_memory_clock,
263 .set_memory_clock = NULL, 346 .set_memory_clock = NULL,
347 .get_pcie_lanes = NULL,
264 .set_pcie_lanes = NULL, 348 .set_pcie_lanes = NULL,
265 .set_clock_gating = &radeon_legacy_set_clock_gating, 349 .set_clock_gating = &radeon_legacy_set_clock_gating,
266 .set_surface_reg = r100_set_surface_reg, 350 .set_surface_reg = r100_set_surface_reg,
@@ -314,14 +398,17 @@ static struct radeon_asic rs600_asic = {
314 .fence_ring_emit = &r300_fence_ring_emit, 398 .fence_ring_emit = &r300_fence_ring_emit,
315 .cs_parse = &r300_cs_parse, 399 .cs_parse = &r300_cs_parse,
316 .copy_blit = &r100_copy_blit, 400 .copy_blit = &r100_copy_blit,
317 .copy_dma = &r300_copy_dma, 401 .copy_dma = &r200_copy_dma,
318 .copy = &r100_copy_blit, 402 .copy = &r100_copy_blit,
319 .get_engine_clock = &radeon_atom_get_engine_clock, 403 .get_engine_clock = &radeon_atom_get_engine_clock,
320 .set_engine_clock = &radeon_atom_set_engine_clock, 404 .set_engine_clock = &radeon_atom_set_engine_clock,
321 .get_memory_clock = &radeon_atom_get_memory_clock, 405 .get_memory_clock = &radeon_atom_get_memory_clock,
322 .set_memory_clock = &radeon_atom_set_memory_clock, 406 .set_memory_clock = &radeon_atom_set_memory_clock,
407 .get_pcie_lanes = NULL,
323 .set_pcie_lanes = NULL, 408 .set_pcie_lanes = NULL,
324 .set_clock_gating = &radeon_atom_set_clock_gating, 409 .set_clock_gating = &radeon_atom_set_clock_gating,
410 .set_surface_reg = r100_set_surface_reg,
411 .clear_surface_reg = r100_clear_surface_reg,
325 .bandwidth_update = &rs600_bandwidth_update, 412 .bandwidth_update = &rs600_bandwidth_update,
326 .hpd_init = &rs600_hpd_init, 413 .hpd_init = &rs600_hpd_init,
327 .hpd_fini = &rs600_hpd_fini, 414 .hpd_fini = &rs600_hpd_fini,
@@ -360,12 +447,13 @@ static struct radeon_asic rs690_asic = {
360 .fence_ring_emit = &r300_fence_ring_emit, 447 .fence_ring_emit = &r300_fence_ring_emit,
361 .cs_parse = &r300_cs_parse, 448 .cs_parse = &r300_cs_parse,
362 .copy_blit = &r100_copy_blit, 449 .copy_blit = &r100_copy_blit,
363 .copy_dma = &r300_copy_dma, 450 .copy_dma = &r200_copy_dma,
364 .copy = &r300_copy_dma, 451 .copy = &r200_copy_dma,
365 .get_engine_clock = &radeon_atom_get_engine_clock, 452 .get_engine_clock = &radeon_atom_get_engine_clock,
366 .set_engine_clock = &radeon_atom_set_engine_clock, 453 .set_engine_clock = &radeon_atom_set_engine_clock,
367 .get_memory_clock = &radeon_atom_get_memory_clock, 454 .get_memory_clock = &radeon_atom_get_memory_clock,
368 .set_memory_clock = &radeon_atom_set_memory_clock, 455 .set_memory_clock = &radeon_atom_set_memory_clock,
456 .get_pcie_lanes = NULL,
369 .set_pcie_lanes = NULL, 457 .set_pcie_lanes = NULL,
370 .set_clock_gating = &radeon_atom_set_clock_gating, 458 .set_clock_gating = &radeon_atom_set_clock_gating,
371 .set_surface_reg = r100_set_surface_reg, 459 .set_surface_reg = r100_set_surface_reg,
@@ -412,12 +500,13 @@ static struct radeon_asic rv515_asic = {
412 .fence_ring_emit = &r300_fence_ring_emit, 500 .fence_ring_emit = &r300_fence_ring_emit,
413 .cs_parse = &r300_cs_parse, 501 .cs_parse = &r300_cs_parse,
414 .copy_blit = &r100_copy_blit, 502 .copy_blit = &r100_copy_blit,
415 .copy_dma = &r300_copy_dma, 503 .copy_dma = &r200_copy_dma,
416 .copy = &r100_copy_blit, 504 .copy = &r100_copy_blit,
417 .get_engine_clock = &radeon_atom_get_engine_clock, 505 .get_engine_clock = &radeon_atom_get_engine_clock,
418 .set_engine_clock = &radeon_atom_set_engine_clock, 506 .set_engine_clock = &radeon_atom_set_engine_clock,
419 .get_memory_clock = &radeon_atom_get_memory_clock, 507 .get_memory_clock = &radeon_atom_get_memory_clock,
420 .set_memory_clock = &radeon_atom_set_memory_clock, 508 .set_memory_clock = &radeon_atom_set_memory_clock,
509 .get_pcie_lanes = &rv370_get_pcie_lanes,
421 .set_pcie_lanes = &rv370_set_pcie_lanes, 510 .set_pcie_lanes = &rv370_set_pcie_lanes,
422 .set_clock_gating = &radeon_atom_set_clock_gating, 511 .set_clock_gating = &radeon_atom_set_clock_gating,
423 .set_surface_reg = r100_set_surface_reg, 512 .set_surface_reg = r100_set_surface_reg,
@@ -455,12 +544,13 @@ static struct radeon_asic r520_asic = {
455 .fence_ring_emit = &r300_fence_ring_emit, 544 .fence_ring_emit = &r300_fence_ring_emit,
456 .cs_parse = &r300_cs_parse, 545 .cs_parse = &r300_cs_parse,
457 .copy_blit = &r100_copy_blit, 546 .copy_blit = &r100_copy_blit,
458 .copy_dma = &r300_copy_dma, 547 .copy_dma = &r200_copy_dma,
459 .copy = &r100_copy_blit, 548 .copy = &r100_copy_blit,
460 .get_engine_clock = &radeon_atom_get_engine_clock, 549 .get_engine_clock = &radeon_atom_get_engine_clock,
461 .set_engine_clock = &radeon_atom_set_engine_clock, 550 .set_engine_clock = &radeon_atom_set_engine_clock,
462 .get_memory_clock = &radeon_atom_get_memory_clock, 551 .get_memory_clock = &radeon_atom_get_memory_clock,
463 .set_memory_clock = &radeon_atom_set_memory_clock, 552 .set_memory_clock = &radeon_atom_set_memory_clock,
553 .get_pcie_lanes = &rv370_get_pcie_lanes,
464 .set_pcie_lanes = &rv370_set_pcie_lanes, 554 .set_pcie_lanes = &rv370_set_pcie_lanes,
465 .set_clock_gating = &radeon_atom_set_clock_gating, 555 .set_clock_gating = &radeon_atom_set_clock_gating,
466 .set_surface_reg = r100_set_surface_reg, 556 .set_surface_reg = r100_set_surface_reg,
@@ -538,8 +628,9 @@ static struct radeon_asic r600_asic = {
538 .set_engine_clock = &radeon_atom_set_engine_clock, 628 .set_engine_clock = &radeon_atom_set_engine_clock,
539 .get_memory_clock = &radeon_atom_get_memory_clock, 629 .get_memory_clock = &radeon_atom_get_memory_clock,
540 .set_memory_clock = &radeon_atom_set_memory_clock, 630 .set_memory_clock = &radeon_atom_set_memory_clock,
631 .get_pcie_lanes = &rv370_get_pcie_lanes,
541 .set_pcie_lanes = NULL, 632 .set_pcie_lanes = NULL,
542 .set_clock_gating = &radeon_atom_set_clock_gating, 633 .set_clock_gating = NULL,
543 .set_surface_reg = r600_set_surface_reg, 634 .set_surface_reg = r600_set_surface_reg,
544 .clear_surface_reg = r600_clear_surface_reg, 635 .clear_surface_reg = r600_clear_surface_reg,
545 .bandwidth_update = &rv515_bandwidth_update, 636 .bandwidth_update = &rv515_bandwidth_update,
@@ -583,6 +674,7 @@ static struct radeon_asic rv770_asic = {
583 .set_engine_clock = &radeon_atom_set_engine_clock, 674 .set_engine_clock = &radeon_atom_set_engine_clock,
584 .get_memory_clock = &radeon_atom_get_memory_clock, 675 .get_memory_clock = &radeon_atom_get_memory_clock,
585 .set_memory_clock = &radeon_atom_set_memory_clock, 676 .set_memory_clock = &radeon_atom_set_memory_clock,
677 .get_pcie_lanes = &rv370_get_pcie_lanes,
586 .set_pcie_lanes = NULL, 678 .set_pcie_lanes = NULL,
587 .set_clock_gating = &radeon_atom_set_clock_gating, 679 .set_clock_gating = &radeon_atom_set_clock_gating,
588 .set_surface_reg = r600_set_surface_reg, 680 .set_surface_reg = r600_set_surface_reg,
@@ -595,4 +687,54 @@ static struct radeon_asic rv770_asic = {
595 .ioctl_wait_idle = r600_ioctl_wait_idle, 687 .ioctl_wait_idle = r600_ioctl_wait_idle,
596}; 688};
597 689
690/*
691 * evergreen
692 */
693int evergreen_init(struct radeon_device *rdev);
694void evergreen_fini(struct radeon_device *rdev);
695int evergreen_suspend(struct radeon_device *rdev);
696int evergreen_resume(struct radeon_device *rdev);
697int evergreen_gpu_reset(struct radeon_device *rdev);
698void evergreen_bandwidth_update(struct radeon_device *rdev);
699void evergreen_hpd_init(struct radeon_device *rdev);
700void evergreen_hpd_fini(struct radeon_device *rdev);
701bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
702void evergreen_hpd_set_polarity(struct radeon_device *rdev,
703 enum radeon_hpd_id hpd);
704
705static struct radeon_asic evergreen_asic = {
706 .init = &evergreen_init,
707 .fini = &evergreen_fini,
708 .suspend = &evergreen_suspend,
709 .resume = &evergreen_resume,
710 .cp_commit = NULL,
711 .gpu_reset = &evergreen_gpu_reset,
712 .vga_set_state = &r600_vga_set_state,
713 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
714 .gart_set_page = &rs600_gart_set_page,
715 .ring_test = NULL,
716 .ring_ib_execute = NULL,
717 .irq_set = NULL,
718 .irq_process = NULL,
719 .get_vblank_counter = NULL,
720 .fence_ring_emit = NULL,
721 .cs_parse = NULL,
722 .copy_blit = NULL,
723 .copy_dma = NULL,
724 .copy = NULL,
725 .get_engine_clock = &radeon_atom_get_engine_clock,
726 .set_engine_clock = &radeon_atom_set_engine_clock,
727 .get_memory_clock = &radeon_atom_get_memory_clock,
728 .set_memory_clock = &radeon_atom_set_memory_clock,
729 .set_pcie_lanes = NULL,
730 .set_clock_gating = NULL,
731 .set_surface_reg = r600_set_surface_reg,
732 .clear_surface_reg = r600_clear_surface_reg,
733 .bandwidth_update = &evergreen_bandwidth_update,
734 .hpd_init = &evergreen_hpd_init,
735 .hpd_fini = &evergreen_hpd_fini,
736 .hpd_sense = &evergreen_hpd_sense,
737 .hpd_set_polarity = &evergreen_hpd_set_polarity,
738};
739
598#endif 740#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4d8831548a5f..93783b15c81d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
159 struct radeon_gpio_rec *gpio) 159 struct radeon_gpio_rec *gpio)
160{ 160{
161 struct radeon_hpd hpd; 161 struct radeon_hpd hpd;
162 u32 reg;
163
164 if (ASIC_IS_DCE4(rdev))
165 reg = EVERGREEN_DC_GPIO_HPD_A;
166 else
167 reg = AVIVO_DC_GPIO_HPD_A;
168
162 hpd.gpio = *gpio; 169 hpd.gpio = *gpio;
163 if (gpio->reg == AVIVO_DC_GPIO_HPD_A) { 170 if (gpio->reg == reg) {
164 switch(gpio->mask) { 171 switch(gpio->mask) {
165 case (1 << 0): 172 case (1 << 0):
166 hpd.hpd = RADEON_HPD_1; 173 hpd.hpd = RADEON_HPD_1;
@@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
574 ddc_bus.valid = false; 581 ddc_bus.valid = false;
575 } 582 }
576 583
584 /* needed for aux chan transactions */
585 ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
586
577 conn_id = le16_to_cpu(path->usConnObjectId); 587 conn_id = le16_to_cpu(path->usConnObjectId);
578 588
579 if (!radeon_atom_apply_quirks 589 if (!radeon_atom_apply_quirks
@@ -838,6 +848,7 @@ union firmware_info {
838 ATOM_FIRMWARE_INFO_V1_2 info_12; 848 ATOM_FIRMWARE_INFO_V1_2 info_12;
839 ATOM_FIRMWARE_INFO_V1_3 info_13; 849 ATOM_FIRMWARE_INFO_V1_3 info_13;
840 ATOM_FIRMWARE_INFO_V1_4 info_14; 850 ATOM_FIRMWARE_INFO_V1_4 info_14;
851 ATOM_FIRMWARE_INFO_V2_1 info_21;
841}; 852};
842 853
843bool radeon_atom_get_clock_info(struct drm_device *dev) 854bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
849 uint8_t frev, crev; 860 uint8_t frev, crev;
850 struct radeon_pll *p1pll = &rdev->clock.p1pll; 861 struct radeon_pll *p1pll = &rdev->clock.p1pll;
851 struct radeon_pll *p2pll = &rdev->clock.p2pll; 862 struct radeon_pll *p2pll = &rdev->clock.p2pll;
863 struct radeon_pll *dcpll = &rdev->clock.dcpll;
852 struct radeon_pll *spll = &rdev->clock.spll; 864 struct radeon_pll *spll = &rdev->clock.spll;
853 struct radeon_pll *mpll = &rdev->clock.mpll; 865 struct radeon_pll *mpll = &rdev->clock.mpll;
854 uint16_t data_offset; 866 uint16_t data_offset;
@@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
951 rdev->clock.default_mclk = 963 rdev->clock.default_mclk =
952 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); 964 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
953 965
966 if (ASIC_IS_DCE4(rdev)) {
967 rdev->clock.default_dispclk =
968 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
969 if (rdev->clock.default_dispclk == 0)
970 rdev->clock.default_dispclk = 60000; /* 600 Mhz */
971 rdev->clock.dp_extclk =
972 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
973 }
974 *dcpll = *p1pll;
975
954 return true; 976 return true;
955 } 977 }
978
956 return false; 979 return false;
957} 980}
958 981
@@ -1091,6 +1114,30 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1091 return ss; 1114 return ss;
1092} 1115}
1093 1116
1117static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
1118 struct radeon_encoder_atom_dig *lvds)
1119{
1120
1121 /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
1122 if ((dev->pdev->device == 0x95c4) &&
1123 (dev->pdev->subsystem_vendor == 0x1179) &&
1124 (dev->pdev->subsystem_device == 0xff50)) {
1125 if ((lvds->native_mode.hdisplay == 1280) &&
1126 (lvds->native_mode.vdisplay == 800))
1127 lvds->pll_algo = PLL_ALGO_LEGACY;
1128 }
1129
1130 /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
1131 if ((dev->pdev->device == 0x95c4) &&
1132 (dev->pdev->subsystem_vendor == 0x1028) &&
1133 (dev->pdev->subsystem_device == 0x029f)) {
1134 if ((lvds->native_mode.hdisplay == 1280) &&
1135 (lvds->native_mode.vdisplay == 800))
1136 lvds->pll_algo = PLL_ALGO_LEGACY;
1137 }
1138
1139}
1140
1094union lvds_info { 1141union lvds_info {
1095 struct _ATOM_LVDS_INFO info; 1142 struct _ATOM_LVDS_INFO info;
1096 struct _ATOM_LVDS_INFO_V12 info_12; 1143 struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1161,6 +1208,21 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1161 1208
1162 lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); 1209 lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
1163 1210
1211 if (ASIC_IS_AVIVO(rdev)) {
1212 if (radeon_new_pll == 0)
1213 lvds->pll_algo = PLL_ALGO_LEGACY;
1214 else
1215 lvds->pll_algo = PLL_ALGO_NEW;
1216 } else {
1217 if (radeon_new_pll == 1)
1218 lvds->pll_algo = PLL_ALGO_NEW;
1219 else
1220 lvds->pll_algo = PLL_ALGO_LEGACY;
1221 }
1222
1223 /* LVDS quirks */
1224 radeon_atom_apply_lvds_quirks(dev, lvds);
1225
1164 encoder->native_mode = lvds->native_mode; 1226 encoder->native_mode = lvds->native_mode;
1165 } 1227 }
1166 return lvds; 1228 return lvds;
@@ -1385,20 +1447,375 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1385 return tv_dac; 1447 return tv_dac;
1386} 1448}
1387 1449
1388void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) 1450union power_info {
1451 struct _ATOM_POWERPLAY_INFO info;
1452 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1453 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1454 struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
1455};
1456
1457void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1389{ 1458{
1390 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; 1459 struct radeon_mode_info *mode_info = &rdev->mode_info;
1391 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); 1460 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1461 u16 data_offset;
1462 u8 frev, crev;
1463 u32 misc, misc2 = 0, sclk, mclk;
1464 union power_info *power_info;
1465 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1466 struct _ATOM_PPLIB_STATE *power_state;
1467 int num_modes = 0, i, j;
1468 int state_index = 0, mode_index = 0;
1392 1469
1393 args.ucEnable = enable; 1470 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
1394 1471
1395 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1472 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1473
1474 rdev->pm.default_power_state = NULL;
1475
1476 if (power_info) {
1477 if (frev < 4) {
1478 num_modes = power_info->info.ucNumOfPowerModeEntries;
1479 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1480 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1481 for (i = 0; i < num_modes; i++) {
1482 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1483 switch (frev) {
1484 case 1:
1485 rdev->pm.power_state[state_index].num_clock_modes = 1;
1486 rdev->pm.power_state[state_index].clock_info[0].mclk =
1487 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
1488 rdev->pm.power_state[state_index].clock_info[0].sclk =
1489 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
1490 /* skip invalid modes */
1491 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1492 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1493 continue;
1494 /* skip overclock modes for now */
1495 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1496 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1497 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1498 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1499 continue;
1500 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1501 power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
1502 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
1503 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1504 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1505 VOLTAGE_GPIO;
1506 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1507 radeon_lookup_gpio(rdev,
1508 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
1509 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1510 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1511 true;
1512 else
1513 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1514 false;
1515 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1516 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1517 VOLTAGE_VDDC;
1518 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1519 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
1520 }
1521 /* order matters! */
1522 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1523 rdev->pm.power_state[state_index].type =
1524 POWER_STATE_TYPE_POWERSAVE;
1525 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1526 rdev->pm.power_state[state_index].type =
1527 POWER_STATE_TYPE_BATTERY;
1528 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1529 rdev->pm.power_state[state_index].type =
1530 POWER_STATE_TYPE_BATTERY;
1531 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1532 rdev->pm.power_state[state_index].type =
1533 POWER_STATE_TYPE_BALANCED;
1534 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1535 rdev->pm.power_state[state_index].type =
1536 POWER_STATE_TYPE_PERFORMANCE;
1537 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1538 rdev->pm.power_state[state_index].type =
1539 POWER_STATE_TYPE_DEFAULT;
1540 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1541 rdev->pm.power_state[state_index].default_clock_mode =
1542 &rdev->pm.power_state[state_index].clock_info[0];
1543 }
1544 state_index++;
1545 break;
1546 case 2:
1547 rdev->pm.power_state[state_index].num_clock_modes = 1;
1548 rdev->pm.power_state[state_index].clock_info[0].mclk =
1549 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
1550 rdev->pm.power_state[state_index].clock_info[0].sclk =
1551 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
1552 /* skip invalid modes */
1553 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1554 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1555 continue;
1556 /* skip overclock modes for now */
1557 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1558 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1559 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1560 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1561 continue;
1562 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1563 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1564 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1565 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
1566 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1567 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1568 VOLTAGE_GPIO;
1569 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1570 radeon_lookup_gpio(rdev,
1571 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
1572 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1573 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1574 true;
1575 else
1576 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1577 false;
1578 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1579 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1580 VOLTAGE_VDDC;
1581 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1582 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
1583 }
1584 /* order matters! */
1585 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1586 rdev->pm.power_state[state_index].type =
1587 POWER_STATE_TYPE_POWERSAVE;
1588 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1589 rdev->pm.power_state[state_index].type =
1590 POWER_STATE_TYPE_BATTERY;
1591 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1592 rdev->pm.power_state[state_index].type =
1593 POWER_STATE_TYPE_BATTERY;
1594 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1595 rdev->pm.power_state[state_index].type =
1596 POWER_STATE_TYPE_BALANCED;
1597 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1598 rdev->pm.power_state[state_index].type =
1599 POWER_STATE_TYPE_PERFORMANCE;
1600 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1601 rdev->pm.power_state[state_index].type =
1602 POWER_STATE_TYPE_BALANCED;
1603 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1604 rdev->pm.power_state[state_index].type =
1605 POWER_STATE_TYPE_DEFAULT;
1606 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1607 rdev->pm.power_state[state_index].default_clock_mode =
1608 &rdev->pm.power_state[state_index].clock_info[0];
1609 }
1610 state_index++;
1611 break;
1612 case 3:
1613 rdev->pm.power_state[state_index].num_clock_modes = 1;
1614 rdev->pm.power_state[state_index].clock_info[0].mclk =
1615 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
1616 rdev->pm.power_state[state_index].clock_info[0].sclk =
1617 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
1618 /* skip invalid modes */
1619 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1620 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1621 continue;
1622 /* skip overclock modes for now */
1623 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1624 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1625 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1626 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1627 continue;
1628 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1629 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
1630 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
1631 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
1632 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1633 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1634 VOLTAGE_GPIO;
1635 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1636 radeon_lookup_gpio(rdev,
1637 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
1638 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1639 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1640 true;
1641 else
1642 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1643 false;
1644 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1645 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1646 VOLTAGE_VDDC;
1647 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1648 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
1649 if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
1650 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
1651 true;
1652 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
1653 power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
1654 }
1655 }
1656 /* order matters! */
1657 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1658 rdev->pm.power_state[state_index].type =
1659 POWER_STATE_TYPE_POWERSAVE;
1660 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1661 rdev->pm.power_state[state_index].type =
1662 POWER_STATE_TYPE_BATTERY;
1663 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1664 rdev->pm.power_state[state_index].type =
1665 POWER_STATE_TYPE_BATTERY;
1666 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1667 rdev->pm.power_state[state_index].type =
1668 POWER_STATE_TYPE_BALANCED;
1669 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1670 rdev->pm.power_state[state_index].type =
1671 POWER_STATE_TYPE_PERFORMANCE;
1672 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1673 rdev->pm.power_state[state_index].type =
1674 POWER_STATE_TYPE_BALANCED;
1675 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1676 rdev->pm.power_state[state_index].type =
1677 POWER_STATE_TYPE_DEFAULT;
1678 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1679 rdev->pm.power_state[state_index].default_clock_mode =
1680 &rdev->pm.power_state[state_index].clock_info[0];
1681 }
1682 state_index++;
1683 break;
1684 }
1685 }
1686 } else if (frev == 4) {
1687 for (i = 0; i < power_info->info_4.ucNumStates; i++) {
1688 mode_index = 0;
1689 power_state = (struct _ATOM_PPLIB_STATE *)
1690 (mode_info->atom_context->bios +
1691 data_offset +
1692 le16_to_cpu(power_info->info_4.usStateArrayOffset) +
1693 i * power_info->info_4.ucStateEntrySize);
1694 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1695 (mode_info->atom_context->bios +
1696 data_offset +
1697 le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
1698 (power_state->ucNonClockStateIndex *
1699 power_info->info_4.ucNonClockSize));
1700 for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
1701 if (rdev->flags & RADEON_IS_IGP) {
1702 struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
1703 (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
1704 (mode_info->atom_context->bios +
1705 data_offset +
1706 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1707 (power_state->ucClockStateIndices[j] *
1708 power_info->info_4.ucClockInfoSize));
1709 sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
1710 sclk |= clock_info->ucLowEngineClockHigh << 16;
1711 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1712 /* skip invalid modes */
1713 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
1714 continue;
1715 /* skip overclock modes for now */
1716 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1717 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
1718 continue;
1719 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1720 VOLTAGE_SW;
1721 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1722 clock_info->usVDDC;
1723 mode_index++;
1724 } else {
1725 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
1726 (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
1727 (mode_info->atom_context->bios +
1728 data_offset +
1729 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1730 (power_state->ucClockStateIndices[j] *
1731 power_info->info_4.ucClockInfoSize));
1732 sclk = le16_to_cpu(clock_info->usEngineClockLow);
1733 sclk |= clock_info->ucEngineClockHigh << 16;
1734 mclk = le16_to_cpu(clock_info->usMemoryClockLow);
1735 mclk |= clock_info->ucMemoryClockHigh << 16;
1736 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
1737 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1738 /* skip invalid modes */
1739 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
1740 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
1741 continue;
1742 /* skip overclock modes for now */
1743 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
1744 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1745 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1746 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1747 continue;
1748 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1749 VOLTAGE_SW;
1750 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1751 clock_info->usVDDC;
1752 mode_index++;
1753 }
1754 }
1755 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
1756 if (mode_index) {
1757 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1758 misc2 = le16_to_cpu(non_clock_info->usClassification);
1759 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1760 ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
1761 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1762 switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
1763 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
1764 rdev->pm.power_state[state_index].type =
1765 POWER_STATE_TYPE_BATTERY;
1766 break;
1767 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
1768 rdev->pm.power_state[state_index].type =
1769 POWER_STATE_TYPE_BALANCED;
1770 break;
1771 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
1772 rdev->pm.power_state[state_index].type =
1773 POWER_STATE_TYPE_PERFORMANCE;
1774 break;
1775 }
1776 if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1777 rdev->pm.power_state[state_index].type =
1778 POWER_STATE_TYPE_DEFAULT;
1779 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1780 rdev->pm.power_state[state_index].default_clock_mode =
1781 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
1782 }
1783 state_index++;
1784 }
1785 }
1786 }
1787 } else {
1788 /* XXX figure out some good default low power mode for cards w/out power tables */
1789 }
1790
1791 if (rdev->pm.default_power_state == NULL) {
1792 /* add the default mode */
1793 rdev->pm.power_state[state_index].type =
1794 POWER_STATE_TYPE_DEFAULT;
1795 rdev->pm.power_state[state_index].num_clock_modes = 1;
1796 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
1797 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
1798 rdev->pm.power_state[state_index].default_clock_mode =
1799 &rdev->pm.power_state[state_index].clock_info[0];
1800 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1801 if (rdev->asic->get_pcie_lanes)
1802 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
1803 else
1804 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
1805 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1806 state_index++;
1807 }
1808 rdev->pm.num_power_states = state_index;
1809
1810 rdev->pm.current_power_state = rdev->pm.default_power_state;
1811 rdev->pm.current_clock_mode =
1812 rdev->pm.default_power_state->default_clock_mode;
1396} 1813}
1397 1814
1398void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable) 1815void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
1399{ 1816{
1400 ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args; 1817 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
1401 int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt); 1818 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
1402 1819
1403 args.ucEnable = enable; 1820 args.ucEnable = enable;
1404 1821
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 000000000000..3f557c4151e0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com>
4 *
5 * Licensed under GPLv2
6 *
7 * ATPX support for both Intel/ATI
8 */
9#include <linux/vga_switcheroo.h>
10#include <acpi/acpi.h>
11#include <acpi/acpi_bus.h>
12#include <linux/pci.h>
13
14#define ATPX_VERSION 0
15#define ATPX_GPU_PWR 2
16#define ATPX_MUX_SELECT 3
17
18#define ATPX_INTEGRATED 0
19#define ATPX_DISCRETE 1
20
21#define ATPX_MUX_IGD 0
22#define ATPX_MUX_DISCRETE 1
23
24static struct radeon_atpx_priv {
25 bool atpx_detected;
26 /* handle for device - and atpx */
27 acpi_handle dhandle;
28 acpi_handle atpx_handle;
29 acpi_handle atrm_handle;
30} radeon_atpx_priv;
31
32/* retrieve the ROM in 4k blocks */
33static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
34 int offset, int len)
35{
36 acpi_status status;
37 union acpi_object atrm_arg_elements[2], *obj;
38 struct acpi_object_list atrm_arg;
39 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
40
41 atrm_arg.count = 2;
42 atrm_arg.pointer = &atrm_arg_elements[0];
43
44 atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
45 atrm_arg_elements[0].integer.value = offset;
46
47 atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
48 atrm_arg_elements[1].integer.value = len;
49
50 status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
51 if (ACPI_FAILURE(status)) {
52 printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
53 return -ENODEV;
54 }
55
56 obj = (union acpi_object *)buffer.pointer;
57 memcpy(bios+offset, obj->buffer.pointer, len);
58 kfree(buffer.pointer);
59 return len;
60}
61
62bool radeon_atrm_supported(struct pci_dev *pdev)
63{
64 /* get the discrete ROM only via ATRM */
65 if (!radeon_atpx_priv.atpx_detected)
66 return false;
67
68 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
69 return false;
70 return true;
71}
72
73
74int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
75{
76 return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
77}
78
79static int radeon_atpx_get_version(acpi_handle handle)
80{
81 acpi_status status;
82 union acpi_object atpx_arg_elements[2], *obj;
83 struct acpi_object_list atpx_arg;
84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
85
86 atpx_arg.count = 2;
87 atpx_arg.pointer = &atpx_arg_elements[0];
88
89 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
90 atpx_arg_elements[0].integer.value = ATPX_VERSION;
91
92 atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
93 atpx_arg_elements[1].integer.value = ATPX_VERSION;
94
95 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
96 if (ACPI_FAILURE(status)) {
97 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
98 return -ENOSYS;
99 }
100 obj = (union acpi_object *)buffer.pointer;
101 if (obj && (obj->type == ACPI_TYPE_BUFFER))
102 printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
103 kfree(buffer.pointer);
104 return 0;
105}
106
107static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
108{
109 acpi_status status;
110 union acpi_object atpx_arg_elements[2];
111 struct acpi_object_list atpx_arg;
112 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
113 uint8_t buf[4] = {0};
114
115 if (!handle)
116 return -EINVAL;
117
118 atpx_arg.count = 2;
119 atpx_arg.pointer = &atpx_arg_elements[0];
120
121 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
122 atpx_arg_elements[0].integer.value = cmd_id;
123
124 buf[2] = value & 0xff;
125 buf[3] = (value >> 8) & 0xff;
126
127 atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
128 atpx_arg_elements[1].buffer.length = 4;
129 atpx_arg_elements[1].buffer.pointer = buf;
130
131 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
132 if (ACPI_FAILURE(status)) {
133 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
134 return -ENOSYS;
135 }
136 kfree(buffer.pointer);
137
138 return 0;
139}
140
141static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
142{
143 return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
144}
145
146static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
147{
148 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
149}
150
151
152static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
153{
154 if (id == VGA_SWITCHEROO_IGD)
155 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
156 else
157 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
158 return 0;
159}
160
161static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
162 enum vga_switcheroo_state state)
163{
164 /* on w500 ACPI can't change intel gpu state */
165 if (id == VGA_SWITCHEROO_IGD)
166 return 0;
167
168 radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
169 return 0;
170}
171
172static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
173{
174 acpi_handle dhandle, atpx_handle, atrm_handle;
175 acpi_status status;
176
177 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
178 if (!dhandle)
179 return false;
180
181 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
182 if (ACPI_FAILURE(status))
183 return false;
184
185 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
186 if (ACPI_FAILURE(status))
187 return false;
188
189 radeon_atpx_priv.dhandle = dhandle;
190 radeon_atpx_priv.atpx_handle = atpx_handle;
191 radeon_atpx_priv.atrm_handle = atrm_handle;
192 return true;
193}
194
195static int radeon_atpx_init(void)
196{
197 /* set up the ATPX handle */
198
199 radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
200 return 0;
201}
202
203static int radeon_atpx_get_client_id(struct pci_dev *pdev)
204{
205 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
206 return VGA_SWITCHEROO_IGD;
207 else
208 return VGA_SWITCHEROO_DIS;
209}
210
211static struct vga_switcheroo_handler radeon_atpx_handler = {
212 .switchto = radeon_atpx_switchto,
213 .power_state = radeon_atpx_power_state,
214 .init = radeon_atpx_init,
215 .get_client_id = radeon_atpx_get_client_id,
216};
217
218static bool radeon_atpx_detect(void)
219{
220 char acpi_method_name[255] = { 0 };
221 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
222 struct pci_dev *pdev = NULL;
223 bool has_atpx = false;
224 int vga_count = 0;
225
226 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
227 vga_count++;
228
229 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
230 }
231
232 if (has_atpx && vga_count == 2) {
233 acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
234 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
235 acpi_method_name);
236 radeon_atpx_priv.atpx_detected = true;
237 return true;
238 }
239 return false;
240}
241
242void radeon_register_atpx_handler(void)
243{
244 bool r;
245
246 /* detect if we have any ATPX + 2 VGA in the system */
247 r = radeon_atpx_detect();
248 if (!r)
249 return;
250
251 vga_switcheroo_register_handler(&radeon_atpx_handler);
252}
253
254void radeon_unregister_atpx_handler(void)
255{
256 vga_switcheroo_unregister_handler();
257}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 906921740c60..557240460526 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -30,6 +30,7 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32 32
33#include <linux/vga_switcheroo.h>
33/* 34/*
34 * BIOS. 35 * BIOS.
35 */ 36 */
@@ -62,7 +63,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
62 iounmap(bios); 63 iounmap(bios);
63 return false; 64 return false;
64 } 65 }
65 memcpy(rdev->bios, bios, size); 66 memcpy_fromio(rdev->bios, bios, size);
66 iounmap(bios); 67 iounmap(bios);
67 return true; 68 return true;
68} 69}
@@ -93,6 +94,38 @@ static bool radeon_read_bios(struct radeon_device *rdev)
93 return true; 94 return true;
94} 95}
95 96
97/* ATRM is used to get the BIOS on the discrete cards in
98 * dual-gpu systems.
99 */
100static bool radeon_atrm_get_bios(struct radeon_device *rdev)
101{
102 int ret;
103 int size = 64 * 1024;
104 int i;
105
106 if (!radeon_atrm_supported(rdev->pdev))
107 return false;
108
109 rdev->bios = kmalloc(size, GFP_KERNEL);
110 if (!rdev->bios) {
111 DRM_ERROR("Unable to allocate bios\n");
112 return false;
113 }
114
115 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
116 ret = radeon_atrm_get_bios_chunk(rdev->bios,
117 (i * ATRM_BIOS_PAGE),
118 ATRM_BIOS_PAGE);
119 if (ret <= 0)
120 break;
121 }
122
123 if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
124 kfree(rdev->bios);
125 return false;
126 }
127 return true;
128}
96static bool r700_read_disabled_bios(struct radeon_device *rdev) 129static bool r700_read_disabled_bios(struct radeon_device *rdev)
97{ 130{
98 uint32_t viph_control; 131 uint32_t viph_control;
@@ -388,16 +421,16 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
388 return legacy_read_disabled_bios(rdev); 421 return legacy_read_disabled_bios(rdev);
389} 422}
390 423
424
391bool radeon_get_bios(struct radeon_device *rdev) 425bool radeon_get_bios(struct radeon_device *rdev)
392{ 426{
393 bool r; 427 bool r;
394 uint16_t tmp; 428 uint16_t tmp;
395 429
396 if (rdev->flags & RADEON_IS_IGP) { 430 r = radeon_atrm_get_bios(rdev);
431 if (r == false)
397 r = igp_read_bios_from_vram(rdev); 432 r = igp_read_bios_from_vram(rdev);
398 if (r == false) 433 if (r == false)
399 r = radeon_read_bios(rdev);
400 } else
401 r = radeon_read_bios(rdev); 434 r = radeon_read_bios(rdev);
402 if (r == false) { 435 if (r == false) {
403 r = radeon_read_disabled_bios(rdev); 436 r = radeon_read_disabled_bios(rdev);
@@ -408,6 +441,13 @@ bool radeon_get_bios(struct radeon_device *rdev)
408 return false; 441 return false;
409 } 442 }
410 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { 443 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
444 printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
445 goto free_bios;
446 }
447
448 tmp = RBIOS16(0x18);
449 if (RBIOS8(tmp + 0x14) != 0x0) {
450 DRM_INFO("Not an x86 BIOS ROM, not using.\n");
411 goto free_bios; 451 goto free_bios;
412 } 452 }
413 453
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 73c4405bf42f..f64936cc4dd9 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
96 struct radeon_device *rdev = dev->dev_private; 96 struct radeon_device *rdev = dev->dev_private;
97 struct radeon_pll *p1pll = &rdev->clock.p1pll; 97 struct radeon_pll *p1pll = &rdev->clock.p1pll;
98 struct radeon_pll *p2pll = &rdev->clock.p2pll; 98 struct radeon_pll *p2pll = &rdev->clock.p2pll;
99 struct radeon_pll *dcpll = &rdev->clock.dcpll;
99 struct radeon_pll *spll = &rdev->clock.spll; 100 struct radeon_pll *spll = &rdev->clock.spll;
100 struct radeon_pll *mpll = &rdev->clock.mpll; 101 struct radeon_pll *mpll = &rdev->clock.mpll;
101 int ret; 102 int ret;
@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
204 p2pll->max_frac_feedback_div = 0; 205 p2pll->max_frac_feedback_div = 0;
205 } 206 }
206 207
208 /* dcpll is DCE4 only */
209 dcpll->min_post_div = 2;
210 dcpll->max_post_div = 0x7f;
211 dcpll->min_frac_feedback_div = 0;
212 dcpll->max_frac_feedback_div = 9;
213 dcpll->min_ref_div = 2;
214 dcpll->max_ref_div = 0x3ff;
215 dcpll->min_feedback_div = 4;
216 dcpll->max_feedback_div = 0xfff;
217 dcpll->best_vco = 0;
218
207 p1pll->min_ref_div = 2; 219 p1pll->min_ref_div = 2;
208 p1pll->max_ref_div = 0x3ff; 220 p1pll->max_ref_div = 0x3ff;
209 p1pll->min_feedback_div = 4; 221 p1pll->min_feedback_div = 4;
@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
846 /* XXX make sure engine is idle */ 858 /* XXX make sure engine is idle */
847 859
848 if (radeon_dynclks != -1) { 860 if (radeon_dynclks != -1) {
849 if (radeon_dynclks) 861 if (radeon_dynclks) {
850 radeon_set_clock_gating(rdev, 1); 862 if (rdev->asic->set_clock_gating)
863 radeon_set_clock_gating(rdev, 1);
864 }
851 } 865 }
852 radeon_apply_clock_quirks(rdev); 866 radeon_apply_clock_quirks(rdev);
853 return 0; 867 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 22d476160d52..e9ea38ece375 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
150 int rev; 150 int rev;
151 uint16_t offset = 0, check_offset; 151 uint16_t offset = 0, check_offset;
152 152
153 if (!rdev->bios)
154 return 0;
155
153 switch (table) { 156 switch (table) {
154 /* absolute offset tables */ 157 /* absolute offset tables */
155 case COMBIOS_ASIC_INIT_1_TABLE: 158 case COMBIOS_ASIC_INIT_1_TABLE:
@@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
443 446
444} 447}
445 448
449bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
450{
451 int edid_info;
452 struct edid *edid;
453 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
454 if (!edid_info)
455 return false;
456
457 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
458 GFP_KERNEL);
459 if (edid == NULL)
460 return false;
461
462 memcpy((unsigned char *)edid,
463 (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
464
465 if (!drm_edid_is_valid(edid)) {
466 kfree(edid);
467 return false;
468 }
469
470 rdev->mode_info.bios_hardcoded_edid = edid;
471 return true;
472}
473
474struct edid *
475radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
476{
477 if (rdev->mode_info.bios_hardcoded_edid)
478 return rdev->mode_info.bios_hardcoded_edid;
479 return NULL;
480}
481
446static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, 482static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
447 int ddc_line) 483 int ddc_line)
448{ 484{
@@ -486,9 +522,65 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
486 i2c.y_data_reg = ddc_line; 522 i2c.y_data_reg = ddc_line;
487 } 523 }
488 524
489 if (rdev->family < CHIP_R200) 525 switch (rdev->family) {
490 i2c.hw_capable = false; 526 case CHIP_R100:
491 else { 527 case CHIP_RV100:
528 case CHIP_RS100:
529 case CHIP_RV200:
530 case CHIP_RS200:
531 case CHIP_RS300:
532 switch (ddc_line) {
533 case RADEON_GPIO_DVI_DDC:
534 /* in theory this should be hw capable,
535 * but it doesn't seem to work
536 */
537 i2c.hw_capable = false;
538 break;
539 default:
540 i2c.hw_capable = false;
541 break;
542 }
543 break;
544 case CHIP_R200:
545 switch (ddc_line) {
546 case RADEON_GPIO_DVI_DDC:
547 case RADEON_GPIO_MONID:
548 i2c.hw_capable = true;
549 break;
550 default:
551 i2c.hw_capable = false;
552 break;
553 }
554 break;
555 case CHIP_RV250:
556 case CHIP_RV280:
557 switch (ddc_line) {
558 case RADEON_GPIO_VGA_DDC:
559 case RADEON_GPIO_DVI_DDC:
560 case RADEON_GPIO_CRT2_DDC:
561 i2c.hw_capable = true;
562 break;
563 default:
564 i2c.hw_capable = false;
565 break;
566 }
567 break;
568 case CHIP_R300:
569 case CHIP_R350:
570 switch (ddc_line) {
571 case RADEON_GPIO_VGA_DDC:
572 case RADEON_GPIO_DVI_DDC:
573 i2c.hw_capable = true;
574 break;
575 default:
576 i2c.hw_capable = false;
577 break;
578 }
579 break;
580 case CHIP_RV350:
581 case CHIP_RV380:
582 case CHIP_RS400:
583 case CHIP_RS480:
492 switch (ddc_line) { 584 switch (ddc_line) {
493 case RADEON_GPIO_VGA_DDC: 585 case RADEON_GPIO_VGA_DDC:
494 case RADEON_GPIO_DVI_DDC: 586 case RADEON_GPIO_DVI_DDC:
@@ -504,9 +596,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
504 i2c.hw_capable = false; 596 i2c.hw_capable = false;
505 break; 597 break;
506 } 598 }
599 break;
600 default:
601 i2c.hw_capable = false;
602 break;
507 } 603 }
508 i2c.mm_i2c = false; 604 i2c.mm_i2c = false;
509 i2c.i2c_id = 0; 605 i2c.i2c_id = 0;
606 i2c.hpd_id = 0;
510 607
511 if (ddc_line) 608 if (ddc_line)
512 i2c.valid = true; 609 i2c.valid = true;
@@ -527,9 +624,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
527 int8_t rev; 624 int8_t rev;
528 uint16_t sclk, mclk; 625 uint16_t sclk, mclk;
529 626
530 if (rdev->bios == NULL)
531 return false;
532
533 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); 627 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
534 if (pll_info) { 628 if (pll_info) {
535 rev = RBIOS8(pll_info); 629 rev = RBIOS8(pll_info);
@@ -654,9 +748,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
654 if (!p_dac) 748 if (!p_dac)
655 return NULL; 749 return NULL;
656 750
657 if (rdev->bios == NULL)
658 goto out;
659
660 /* check CRT table */ 751 /* check CRT table */
661 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 752 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
662 if (dac_info) { 753 if (dac_info) {
@@ -673,7 +764,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
673 found = 1; 764 found = 1;
674 } 765 }
675 766
676out:
677 if (!found) /* fallback to defaults */ 767 if (!found) /* fallback to defaults */
678 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); 768 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
679 769
@@ -687,9 +777,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
687 uint16_t tv_info; 777 uint16_t tv_info;
688 enum radeon_tv_std tv_std = TV_STD_NTSC; 778 enum radeon_tv_std tv_std = TV_STD_NTSC;
689 779
690 if (rdev->bios == NULL)
691 return tv_std;
692
693 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 780 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
694 if (tv_info) { 781 if (tv_info) {
695 if (RBIOS8(tv_info + 6) == 'T') { 782 if (RBIOS8(tv_info + 6) == 'T') {
@@ -793,9 +880,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
793 if (!tv_dac) 880 if (!tv_dac)
794 return NULL; 881 return NULL;
795 882
796 if (rdev->bios == NULL)
797 goto out;
798
799 /* first check TV table */ 883 /* first check TV table */
800 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 884 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
801 if (dac_info) { 885 if (dac_info) {
@@ -857,7 +941,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
857 } 941 }
858 } 942 }
859 943
860out:
861 if (!found) /* fallback to defaults */ 944 if (!found) /* fallback to defaults */
862 radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); 945 radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
863 946
@@ -945,11 +1028,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
945 int tmp, i; 1028 int tmp, i;
946 struct radeon_encoder_lvds *lvds = NULL; 1029 struct radeon_encoder_lvds *lvds = NULL;
947 1030
948 if (rdev->bios == NULL) {
949 lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
950 goto out;
951 }
952
953 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); 1031 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
954 1032
955 if (lcd_info) { 1033 if (lcd_info) {
@@ -1050,7 +1128,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1050 DRM_INFO("No panel info found in BIOS\n"); 1128 DRM_INFO("No panel info found in BIOS\n");
1051 lvds = radeon_legacy_get_lvds_info_from_regs(rdev); 1129 lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
1052 } 1130 }
1053out: 1131
1054 if (lvds) 1132 if (lvds)
1055 encoder->native_mode = lvds->native_mode; 1133 encoder->native_mode = lvds->native_mode;
1056 return lvds; 1134 return lvds;
@@ -1102,9 +1180,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1102 int i, n; 1180 int i, n;
1103 uint8_t ver; 1181 uint8_t ver;
1104 1182
1105 if (rdev->bios == NULL)
1106 return false;
1107
1108 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); 1183 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1109 1184
1110 if (tmds_info) { 1185 if (tmds_info) {
@@ -1184,9 +1259,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1184 enum radeon_combios_ddc gpio; 1259 enum radeon_combios_ddc gpio;
1185 struct radeon_i2c_bus_rec i2c_bus; 1260 struct radeon_i2c_bus_rec i2c_bus;
1186 1261
1187 if (rdev->bios == NULL)
1188 return false;
1189
1190 tmds->i2c_bus = NULL; 1262 tmds->i2c_bus = NULL;
1191 if (rdev->flags & RADEON_IS_IGP) { 1263 if (rdev->flags & RADEON_IS_IGP) {
1192 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); 1264 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
@@ -1253,7 +1325,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1253 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); 1325 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1254 break; 1326 break;
1255 case DDC_LCD: /* MM i2c */ 1327 case DDC_LCD: /* MM i2c */
1256 DRM_ERROR("MM i2c requires hw i2c engine\n"); 1328 i2c_bus.valid = true;
1329 i2c_bus.hw_capable = true;
1330 i2c_bus.mm_i2c = true;
1331 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1257 break; 1332 break;
1258 default: 1333 default:
1259 DRM_ERROR("Unsupported gpio %d\n", gpio); 1334 DRM_ERROR("Unsupported gpio %d\n", gpio);
@@ -1909,9 +1984,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1909 struct radeon_i2c_bus_rec ddc_i2c; 1984 struct radeon_i2c_bus_rec ddc_i2c;
1910 struct radeon_hpd hpd; 1985 struct radeon_hpd hpd;
1911 1986
1912 if (rdev->bios == NULL)
1913 return false;
1914
1915 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE); 1987 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
1916 if (conn_info) { 1988 if (conn_info) {
1917 for (i = 0; i < 4; i++) { 1989 for (i = 0; i < 4; i++) {
@@ -2278,6 +2350,115 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2278 return true; 2350 return true;
2279} 2351}
2280 2352
2353void radeon_combios_get_power_modes(struct radeon_device *rdev)
2354{
2355 struct drm_device *dev = rdev->ddev;
2356 u16 offset, misc, misc2 = 0;
2357 u8 rev, blocks, tmp;
2358 int state_index = 0;
2359
2360 rdev->pm.default_power_state = NULL;
2361
2362 if (rdev->flags & RADEON_IS_MOBILITY) {
2363 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2364 if (offset) {
2365 rev = RBIOS8(offset);
2366 blocks = RBIOS8(offset + 0x2);
2367 /* power mode 0 tends to be the only valid one */
2368 rdev->pm.power_state[state_index].num_clock_modes = 1;
2369 rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
2370 rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
2371 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
2372 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
2373 goto default_mode;
2374 /* skip overclock modes for now */
2375 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
2376 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
2377 (rdev->pm.power_state[state_index].clock_info[0].sclk >
2378 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
2379 goto default_mode;
2380 rdev->pm.power_state[state_index].type =
2381 POWER_STATE_TYPE_BATTERY;
2382 misc = RBIOS16(offset + 0x5 + 0x0);
2383 if (rev > 4)
2384 misc2 = RBIOS16(offset + 0x5 + 0xe);
2385 if (misc & 0x4) {
2386 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
2387 if (misc & 0x8)
2388 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2389 true;
2390 else
2391 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2392 false;
2393 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
2394 if (rev < 6) {
2395 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
2396 RBIOS16(offset + 0x5 + 0xb) * 4;
2397 tmp = RBIOS8(offset + 0x5 + 0xd);
2398 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
2399 } else {
2400 u8 entries = RBIOS8(offset + 0x5 + 0xb);
2401 u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
2402 if (entries && voltage_table_offset) {
2403 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
2404 RBIOS16(voltage_table_offset) * 4;
2405 tmp = RBIOS8(voltage_table_offset + 0x2);
2406 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
2407 } else
2408 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
2409 }
2410 switch ((misc2 & 0x700) >> 8) {
2411 case 0:
2412 default:
2413 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
2414 break;
2415 case 1:
2416 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
2417 break;
2418 case 2:
2419 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
2420 break;
2421 case 3:
2422 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
2423 break;
2424 case 4:
2425 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
2426 break;
2427 }
2428 } else
2429 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2430 if (rev > 6)
2431 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
2432 RBIOS8(offset + 0x5 + 0x10);
2433 state_index++;
2434 } else {
2435 /* XXX figure out some good default low power mode for mobility cards w/out power tables */
2436 }
2437 } else {
2438 /* XXX figure out some good default low power mode for desktop cards */
2439 }
2440
2441default_mode:
2442 /* add the default mode */
2443 rdev->pm.power_state[state_index].type =
2444 POWER_STATE_TYPE_DEFAULT;
2445 rdev->pm.power_state[state_index].num_clock_modes = 1;
2446 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2447 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2448 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
2449 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2450 if (rdev->asic->get_pcie_lanes)
2451 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
2452 else
2453 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
2454 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
2455 rdev->pm.num_power_states = state_index + 1;
2456
2457 rdev->pm.current_power_state = rdev->pm.default_power_state;
2458 rdev->pm.current_clock_mode =
2459 rdev->pm.default_power_state->default_clock_mode;
2460}
2461
2281void radeon_external_tmds_setup(struct drm_encoder *encoder) 2462void radeon_external_tmds_setup(struct drm_encoder *encoder)
2282{ 2463{
2283 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2464 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2289,23 +2470,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder)
2289 switch (tmds->dvo_chip) { 2470 switch (tmds->dvo_chip) {
2290 case DVO_SIL164: 2471 case DVO_SIL164:
2291 /* sil 164 */ 2472 /* sil 164 */
2292 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2473 radeon_i2c_put_byte(tmds->i2c_bus,
2293 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2474 tmds->slave_addr,
2294 tmds->slave_addr, 2475 0x08, 0x30);
2295 0x08, 0x30); 2476 radeon_i2c_put_byte(tmds->i2c_bus,
2296 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2297 tmds->slave_addr, 2477 tmds->slave_addr,
2298 0x09, 0x00); 2478 0x09, 0x00);
2299 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2479 radeon_i2c_put_byte(tmds->i2c_bus,
2300 tmds->slave_addr, 2480 tmds->slave_addr,
2301 0x0a, 0x90); 2481 0x0a, 0x90);
2302 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2482 radeon_i2c_put_byte(tmds->i2c_bus,
2303 tmds->slave_addr, 2483 tmds->slave_addr,
2304 0x0c, 0x89); 2484 0x0c, 0x89);
2305 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2485 radeon_i2c_put_byte(tmds->i2c_bus,
2306 tmds->slave_addr, 2486 tmds->slave_addr,
2307 0x08, 0x3b); 2487 0x08, 0x3b);
2308 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2309 break; 2488 break;
2310 case DVO_SIL1178: 2489 case DVO_SIL1178:
2311 /* sil 1178 - untested */ 2490 /* sil 1178 - untested */
@@ -2338,9 +2517,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2338 uint32_t reg, val, and_mask, or_mask; 2517 uint32_t reg, val, and_mask, or_mask;
2339 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; 2518 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
2340 2519
2341 if (rdev->bios == NULL)
2342 return false;
2343
2344 if (!tmds) 2520 if (!tmds)
2345 return false; 2521 return false;
2346 2522
@@ -2390,11 +2566,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2390 index++; 2566 index++;
2391 val = RBIOS8(index); 2567 val = RBIOS8(index);
2392 index++; 2568 index++;
2393 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2569 radeon_i2c_put_byte(tmds->i2c_bus,
2394 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2570 slave_addr,
2395 slave_addr, 2571 reg, val);
2396 reg, val);
2397 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2398 break; 2572 break;
2399 default: 2573 default:
2400 DRM_ERROR("Unknown id %d\n", id >> 13); 2574 DRM_ERROR("Unknown id %d\n", id >> 13);
@@ -2447,11 +2621,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2447 reg = id & 0x1fff; 2621 reg = id & 0x1fff;
2448 val = RBIOS8(index); 2622 val = RBIOS8(index);
2449 index += 1; 2623 index += 1;
2450 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2624 radeon_i2c_put_byte(tmds->i2c_bus,
2451 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2625 tmds->slave_addr,
2452 tmds->slave_addr, 2626 reg, val);
2453 reg, val);
2454 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2455 break; 2627 break;
2456 default: 2628 default:
2457 DRM_ERROR("Unknown id %d\n", id >> 13); 2629 DRM_ERROR("Unknown id %d\n", id >> 13);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 65f81942f399..ee0083f982d8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
479 ret = connector_status_connected; 479 ret = connector_status_connected;
480 else { 480 else {
481 if (radeon_connector->ddc_bus) { 481 if (radeon_connector->ddc_bus) {
482 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
483 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 482 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
484 &radeon_connector->ddc_bus->adapter); 483 &radeon_connector->ddc_bus->adapter);
485 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
486 if (radeon_connector->edid) 484 if (radeon_connector->edid)
487 ret = connector_status_connected; 485 ret = connector_status_connected;
488 } 486 }
@@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
587 if (!encoder) 585 if (!encoder)
588 ret = connector_status_disconnected; 586 ret = connector_status_disconnected;
589 587
590 if (radeon_connector->ddc_bus) { 588 if (radeon_connector->ddc_bus)
591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 dret = radeon_ddc_probe(radeon_connector); 589 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
595 if (dret) { 590 if (dret) {
596 if (radeon_connector->edid) { 591 if (radeon_connector->edid) {
597 kfree(radeon_connector->edid); 592 kfree(radeon_connector->edid);
598 radeon_connector->edid = NULL; 593 radeon_connector->edid = NULL;
599 } 594 }
600 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
601 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 595 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
602 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
603 596
604 if (!radeon_connector->edid) { 597 if (!radeon_connector->edid) {
605 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 598 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
744 enum drm_connector_status ret = connector_status_disconnected; 737 enum drm_connector_status ret = connector_status_disconnected;
745 bool dret = false; 738 bool dret = false;
746 739
747 if (radeon_connector->ddc_bus) { 740 if (radeon_connector->ddc_bus)
748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
749 dret = radeon_ddc_probe(radeon_connector); 741 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
752 if (dret) { 742 if (dret) {
753 if (radeon_connector->edid) { 743 if (radeon_connector->edid) {
754 kfree(radeon_connector->edid); 744 kfree(radeon_connector->edid);
755 radeon_connector->edid = NULL; 745 radeon_connector->edid = NULL;
756 } 746 }
757 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
758 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 747 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
759 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
760 748
761 if (!radeon_connector->edid) { 749 if (!radeon_connector->edid) {
762 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 750 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
952 if (radeon_connector->edid) 940 if (radeon_connector->edid)
953 kfree(radeon_connector->edid); 941 kfree(radeon_connector->edid);
954 if (radeon_dig_connector->dp_i2c_bus) 942 if (radeon_dig_connector->dp_i2c_bus)
955 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); 943 radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
956 kfree(radeon_connector->con_priv); 944 kfree(radeon_connector->con_priv);
957 drm_sysfs_connector_remove(connector); 945 drm_sysfs_connector_remove(connector);
958 drm_connector_cleanup(connector); 946 drm_connector_cleanup(connector);
@@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
988 ret = connector_status_connected; 976 ret = connector_status_connected;
989 } 977 }
990 } else { 978 } else {
991 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
992 if (radeon_ddc_probe(radeon_connector)) { 979 if (radeon_ddc_probe(radeon_connector)) {
993 radeon_dig_connector->dp_sink_type = sink_type; 980 radeon_dig_connector->dp_sink_type = sink_type;
994 ret = connector_status_connected; 981 ret = connector_status_connected;
995 } 982 }
996 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
997 } 983 }
998 984
999 return ret; 985 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 06123ba31d31..dc6eba6b96dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1644,6 +1644,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
1644 radeon_cp_load_microcode(dev_priv); 1644 radeon_cp_load_microcode(dev_priv);
1645 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); 1645 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
1646 1646
1647 dev_priv->have_z_offset = 0;
1647 radeon_do_engine_reset(dev); 1648 radeon_do_engine_reset(dev);
1648 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 1649 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1649 1650
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e9d085021c1f..70ba02ed7723 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
194 } 194 }
195 radeon_bo_list_unreserve(&parser->validated); 195 radeon_bo_list_unreserve(&parser->validated);
196 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
197 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj)
198 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
199 drm_gem_object_unreference(parser->relocs[i].gobj);
200 mutex_unlock(&parser->rdev->ddev->struct_mutex);
201 }
202 } 199 }
203 kfree(parser->track); 200 kfree(parser->track);
204 kfree(parser->relocs); 201 kfree(parser->relocs);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 28772a37009c..b7023fff89eb 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
37 uint32_t cur_lock; 37 uint32_t cur_lock;
38 38
39 if (ASIC_IS_AVIVO(rdev)) { 39 if (ASIC_IS_DCE4(rdev)) {
40 cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock)
42 cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
43 else
44 cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
45 WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
46 } else if (ASIC_IS_AVIVO(rdev)) {
40 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); 47 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock) 48 if (lock)
42 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; 49 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
58 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 65 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
59 struct radeon_device *rdev = crtc->dev->dev_private; 66 struct radeon_device *rdev = crtc->dev->dev_private;
60 67
61 if (ASIC_IS_AVIVO(rdev)) { 68 if (ASIC_IS_DCE4(rdev)) {
69 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
70 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
71 } else if (ASIC_IS_AVIVO(rdev)) {
62 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 72 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
63 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 73 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
64 } else { 74 } else {
@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
81 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 91 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
82 struct radeon_device *rdev = crtc->dev->dev_private; 92 struct radeon_device *rdev = crtc->dev->dev_private;
83 93
84 if (ASIC_IS_AVIVO(rdev)) { 94 if (ASIC_IS_DCE4(rdev)) {
95 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
96 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
97 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
98 } else if (ASIC_IS_AVIVO(rdev)) {
85 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
86 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
87 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 101 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
88 } else { 102 } else {
89 switch (radeon_crtc->crtc_id) { 103 switch (radeon_crtc->crtc_id) {
90 case 0: 104 case 0:
@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
109 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 123 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
110 struct radeon_device *rdev = crtc->dev->dev_private; 124 struct radeon_device *rdev = crtc->dev->dev_private;
111 125
112 if (ASIC_IS_AVIVO(rdev)) { 126 if (ASIC_IS_DCE4(rdev)) {
127 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
128 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
129 } else if (ASIC_IS_AVIVO(rdev)) {
113 if (rdev->family >= CHIP_RV770) { 130 if (rdev->family >= CHIP_RV770) {
114 if (radeon_crtc->crtc_id) 131 if (radeon_crtc->crtc_id)
115 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); 132 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
@@ -169,17 +186,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
169unpin: 186unpin:
170 if (radeon_crtc->cursor_bo) { 187 if (radeon_crtc->cursor_bo) {
171 radeon_gem_object_unpin(radeon_crtc->cursor_bo); 188 radeon_gem_object_unpin(radeon_crtc->cursor_bo);
172 mutex_lock(&crtc->dev->struct_mutex); 189 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
173 drm_gem_object_unreference(radeon_crtc->cursor_bo);
174 mutex_unlock(&crtc->dev->struct_mutex);
175 } 190 }
176 191
177 radeon_crtc->cursor_bo = obj; 192 radeon_crtc->cursor_bo = obj;
178 return 0; 193 return 0;
179fail: 194fail:
180 mutex_lock(&crtc->dev->struct_mutex); 195 drm_gem_object_unreference_unlocked(obj);
181 drm_gem_object_unreference(obj);
182 mutex_unlock(&crtc->dev->struct_mutex);
183 196
184 return 0; 197 return 0;
185} 198}
@@ -201,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
201 yorigin = CURSOR_HEIGHT - 1; 214 yorigin = CURSOR_HEIGHT - 1;
202 215
203 radeon_lock_cursor(crtc, true); 216 radeon_lock_cursor(crtc, true);
204 if (ASIC_IS_AVIVO(rdev)) { 217 if (ASIC_IS_DCE4(rdev)) {
218 /* cursors are offset into the total surface */
219 x += crtc->x;
220 y += crtc->y;
221 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
222
223 /* XXX: check if evergreen has the same issues as avivo chips */
224 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
225 ((xorigin ? 0 : x) << 16) |
226 (yorigin ? 0 : y));
227 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
228 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
229 ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
230 } else if (ASIC_IS_AVIVO(rdev)) {
205 int w = radeon_crtc->cursor_width; 231 int w = radeon_crtc->cursor_width;
206 int i = 0; 232 int i = 0;
207 struct drm_crtc *crtc_p; 233 struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 768b1509fa03..e28e4ed5f720 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
30#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h> 31#include <drm/radeon_drm.h>
32#include <linux/vgaarb.h> 32#include <linux/vgaarb.h>
33#include <linux/vga_switcheroo.h>
33#include "radeon_reg.h" 34#include "radeon_reg.h"
34#include "radeon.h" 35#include "radeon.h"
35#include "radeon_asic.h" 36#include "radeon_asic.h"
@@ -100,80 +101,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
100 } 101 }
101} 102}
102 103
103/* 104/**
104 * MC common functions 105 * radeon_vram_location - try to find VRAM location
106 * @rdev: radeon device structure holding all necessary informations
107 * @mc: memory controller structure holding memory informations
108 * @base: base address at which to put VRAM
109 *
110 * Function will place try to place VRAM at base address provided
111 * as parameter (which is so far either PCI aperture address or
112 * for IGP TOM base address).
113 *
114 * If there is not enough space to fit the unvisible VRAM in the 32bits
115 * address space then we limit the VRAM size to the aperture.
116 *
117 * If we are using AGP and if the AGP aperture doesn't allow us to have
118 * room for all the VRAM than we restrict the VRAM to the PCI aperture
119 * size and print a warning.
120 *
121 * This function will never fails, worst case are limiting VRAM.
122 *
123 * Note: GTT start, end, size should be initialized before calling this
124 * function on AGP platform.
125 *
126 * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
127 * this shouldn't be a problem as we are using the PCI aperture as a reference.
128 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
129 * not IGP.
130 *
131 * Note: we use mc_vram_size as on some board we need to program the mc to
132 * cover the whole aperture even if VRAM size is inferior to aperture size
133 * Novell bug 204882 + along with lots of ubuntu ones
134 *
135 * Note: when limiting vram it's safe to overwritte real_vram_size because
136 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
137 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
138 * ones)
139 *
140 * Note: IGP TOM addr should be the same as the aperture addr, we don't
141 * explicitly check for that thought.
142 *
143 * FIXME: when reducing VRAM size align new size on power of 2.
144 */
145void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
146{
147 mc->vram_start = base;
148 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
149 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
150 mc->real_vram_size = mc->aper_size;
151 mc->mc_vram_size = mc->aper_size;
152 }
153 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
154 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
155 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
156 mc->real_vram_size = mc->aper_size;
157 mc->mc_vram_size = mc->aper_size;
158 }
159 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
160 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
161 mc->mc_vram_size >> 20, mc->vram_start,
162 mc->vram_end, mc->real_vram_size >> 20);
163}
164
165/**
166 * radeon_gtt_location - try to find GTT location
167 * @rdev: radeon device structure holding all necessary informations
168 * @mc: memory controller structure holding memory informations
169 *
170 * Function will place try to place GTT before or after VRAM.
171 *
172 * If GTT size is bigger than space left then we ajust GTT size.
173 * Thus function will never fails.
174 *
175 * FIXME: when reducing GTT size align new size on power of 2.
105 */ 176 */
106int radeon_mc_setup(struct radeon_device *rdev) 177void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
107{ 178{
108 uint32_t tmp; 179 u64 size_af, size_bf;
109 180
110 /* Some chips have an "issue" with the memory controller, the 181 size_af = 0xFFFFFFFF - mc->vram_end;
111 * location must be aligned to the size. We just align it down, 182 size_bf = mc->vram_start;
112 * too bad if we walk over the top of system memory, we don't 183 if (size_bf > size_af) {
113 * use DMA without a remapped anyway. 184 if (mc->gtt_size > size_bf) {
114 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP 185 dev_warn(rdev->dev, "limiting GTT\n");
115 */ 186 mc->gtt_size = size_bf;
116 /* FGLRX seems to setup like this, VRAM a 0, then GART.
117 */
118 /*
119 * Note: from R6xx the address space is 40bits but here we only
120 * use 32bits (still have to see a card which would exhaust 4G
121 * address space).
122 */
123 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
124 /* vram location was already setup try to put gtt after
125 * if it fits */
126 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
127 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
128 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
129 rdev->mc.gtt_location = tmp;
130 } else {
131 if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
132 printk(KERN_ERR "[drm] GTT too big to fit "
133 "before or after vram location.\n");
134 return -EINVAL;
135 }
136 rdev->mc.gtt_location = 0;
137 }
138 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
139 /* gtt location was already setup try to put vram before
140 * if it fits */
141 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
142 rdev->mc.vram_location = 0;
143 } else {
144 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
145 tmp += (rdev->mc.mc_vram_size - 1);
146 tmp &= ~(rdev->mc.mc_vram_size - 1);
147 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
148 rdev->mc.vram_location = tmp;
149 } else {
150 printk(KERN_ERR "[drm] vram too big to fit "
151 "before or after GTT location.\n");
152 return -EINVAL;
153 }
154 } 187 }
188 mc->gtt_start = mc->vram_start - mc->gtt_size;
155 } else { 189 } else {
156 rdev->mc.vram_location = 0; 190 if (mc->gtt_size > size_af) {
157 tmp = rdev->mc.mc_vram_size; 191 dev_warn(rdev->dev, "limiting GTT\n");
158 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 192 mc->gtt_size = size_af;
159 rdev->mc.gtt_location = tmp; 193 }
160 } 194 mc->gtt_start = mc->vram_end + 1;
161 rdev->mc.vram_start = rdev->mc.vram_location; 195 }
162 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 196 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
163 rdev->mc.gtt_start = rdev->mc.gtt_location; 197 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
164 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 198 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
165 DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
166 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
167 (unsigned)rdev->mc.vram_location,
168 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
169 DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
170 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
171 (unsigned)rdev->mc.gtt_location,
172 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
173 return 0;
174} 199}
175 200
176
177/* 201/*
178 * GPU helpers function. 202 * GPU helpers function.
179 */ 203 */
@@ -182,7 +206,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
182 uint32_t reg; 206 uint32_t reg;
183 207
184 /* first check CRTCs */ 208 /* first check CRTCs */
185 if (ASIC_IS_AVIVO(rdev)) { 209 if (ASIC_IS_DCE4(rdev)) {
210 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
211 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
212 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
213 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
214 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
215 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
216 if (reg & EVERGREEN_CRTC_MASTER_EN)
217 return true;
218 } else if (ASIC_IS_AVIVO(rdev)) {
186 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 219 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
187 RREG32(AVIVO_D2CRTC_CONTROL); 220 RREG32(AVIVO_D2CRTC_CONTROL);
188 if (reg & AVIVO_CRTC_EN) { 221 if (reg & AVIVO_CRTC_EN) {
@@ -229,6 +262,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
229 262
230int radeon_dummy_page_init(struct radeon_device *rdev) 263int radeon_dummy_page_init(struct radeon_device *rdev)
231{ 264{
265 if (rdev->dummy_page.page)
266 return 0;
232 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 267 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
233 if (rdev->dummy_page.page == NULL) 268 if (rdev->dummy_page.page == NULL)
234 return -ENOMEM; 269 return -ENOMEM;
@@ -310,7 +345,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev)
310 rdev->mc_rreg = &rs600_mc_rreg; 345 rdev->mc_rreg = &rs600_mc_rreg;
311 rdev->mc_wreg = &rs600_mc_wreg; 346 rdev->mc_wreg = &rs600_mc_wreg;
312 } 347 }
313 if (rdev->family >= CHIP_R600) { 348 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
314 rdev->pciep_rreg = &r600_pciep_rreg; 349 rdev->pciep_rreg = &r600_pciep_rreg;
315 rdev->pciep_wreg = &r600_pciep_wreg; 350 rdev->pciep_wreg = &r600_pciep_wreg;
316 } 351 }
@@ -329,21 +364,22 @@ int radeon_asic_init(struct radeon_device *rdev)
329 case CHIP_RS100: 364 case CHIP_RS100:
330 case CHIP_RV200: 365 case CHIP_RV200:
331 case CHIP_RS200: 366 case CHIP_RS200:
367 rdev->asic = &r100_asic;
368 break;
332 case CHIP_R200: 369 case CHIP_R200:
333 case CHIP_RV250: 370 case CHIP_RV250:
334 case CHIP_RS300: 371 case CHIP_RS300:
335 case CHIP_RV280: 372 case CHIP_RV280:
336 rdev->asic = &r100_asic; 373 rdev->asic = &r200_asic;
337 break; 374 break;
338 case CHIP_R300: 375 case CHIP_R300:
339 case CHIP_R350: 376 case CHIP_R350:
340 case CHIP_RV350: 377 case CHIP_RV350:
341 case CHIP_RV380: 378 case CHIP_RV380:
342 rdev->asic = &r300_asic; 379 if (rdev->flags & RADEON_IS_PCIE)
343 if (rdev->flags & RADEON_IS_PCIE) { 380 rdev->asic = &r300_asic_pcie;
344 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 381 else
345 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 382 rdev->asic = &r300_asic;
346 }
347 break; 383 break;
348 case CHIP_R420: 384 case CHIP_R420:
349 case CHIP_R423: 385 case CHIP_R423:
@@ -387,6 +423,13 @@ int radeon_asic_init(struct radeon_device *rdev)
387 case CHIP_RV740: 423 case CHIP_RV740:
388 rdev->asic = &rv770_asic; 424 rdev->asic = &rv770_asic;
389 break; 425 break;
426 case CHIP_CEDAR:
427 case CHIP_REDWOOD:
428 case CHIP_JUNIPER:
429 case CHIP_CYPRESS:
430 case CHIP_HEMLOCK:
431 rdev->asic = &evergreen_asic;
432 break;
390 default: 433 default:
391 /* FIXME: not supported yet */ 434 /* FIXME: not supported yet */
392 return -EINVAL; 435 return -EINVAL;
@@ -613,6 +656,36 @@ void radeon_check_arguments(struct radeon_device *rdev)
613 } 656 }
614} 657}
615 658
659static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
660{
661 struct drm_device *dev = pci_get_drvdata(pdev);
662 struct radeon_device *rdev = dev->dev_private;
663 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
664 if (state == VGA_SWITCHEROO_ON) {
665 printk(KERN_INFO "radeon: switched on\n");
666 /* don't suspend or resume card normally */
667 rdev->powered_down = false;
668 radeon_resume_kms(dev);
669 } else {
670 printk(KERN_INFO "radeon: switched off\n");
671 radeon_suspend_kms(dev, pmm);
672 /* don't suspend or resume card normally */
673 rdev->powered_down = true;
674 }
675}
676
677static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
678{
679 struct drm_device *dev = pci_get_drvdata(pdev);
680 bool can_switch;
681
682 spin_lock(&dev->count_lock);
683 can_switch = (dev->open_count == 0);
684 spin_unlock(&dev->count_lock);
685 return can_switch;
686}
687
688
616int radeon_device_init(struct radeon_device *rdev, 689int radeon_device_init(struct radeon_device *rdev,
617 struct drm_device *ddev, 690 struct drm_device *ddev,
618 struct pci_dev *pdev, 691 struct pci_dev *pdev,
@@ -638,11 +711,14 @@ int radeon_device_init(struct radeon_device *rdev,
638 mutex_init(&rdev->cs_mutex); 711 mutex_init(&rdev->cs_mutex);
639 mutex_init(&rdev->ib_pool.mutex); 712 mutex_init(&rdev->ib_pool.mutex);
640 mutex_init(&rdev->cp.mutex); 713 mutex_init(&rdev->cp.mutex);
714 mutex_init(&rdev->dc_hw_i2c_mutex);
641 if (rdev->family >= CHIP_R600) 715 if (rdev->family >= CHIP_R600)
642 spin_lock_init(&rdev->ih.lock); 716 spin_lock_init(&rdev->ih.lock);
643 mutex_init(&rdev->gem.mutex); 717 mutex_init(&rdev->gem.mutex);
718 mutex_init(&rdev->pm.mutex);
644 rwlock_init(&rdev->fence_drv.lock); 719 rwlock_init(&rdev->fence_drv.lock);
645 INIT_LIST_HEAD(&rdev->gem.objects); 720 INIT_LIST_HEAD(&rdev->gem.objects);
721 init_waitqueue_head(&rdev->irq.vblank_queue);
646 722
647 /* setup workqueue */ 723 /* setup workqueue */
648 rdev->wq = create_workqueue("radeon"); 724 rdev->wq = create_workqueue("radeon");
@@ -692,6 +768,9 @@ int radeon_device_init(struct radeon_device *rdev,
692 /* this will fail for cards that aren't VGA class devices, just 768 /* this will fail for cards that aren't VGA class devices, just
693 * ignore it */ 769 * ignore it */
694 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 770 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
771 vga_switcheroo_register_client(rdev->pdev,
772 radeon_switcheroo_set_state,
773 radeon_switcheroo_can_switch);
695 774
696 r = radeon_init(rdev); 775 r = radeon_init(rdev);
697 if (r) 776 if (r)
@@ -723,6 +802,7 @@ void radeon_device_fini(struct radeon_device *rdev)
723 rdev->shutdown = true; 802 rdev->shutdown = true;
724 radeon_fini(rdev); 803 radeon_fini(rdev);
725 destroy_workqueue(rdev->wq); 804 destroy_workqueue(rdev->wq);
805 vga_switcheroo_unregister_client(rdev->pdev);
726 vga_client_register(rdev->pdev, NULL, NULL, NULL); 806 vga_client_register(rdev->pdev, NULL, NULL, NULL);
727 iounmap(rdev->rmmio); 807 iounmap(rdev->rmmio);
728 rdev->rmmio = NULL; 808 rdev->rmmio = NULL;
@@ -746,6 +826,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
746 } 826 }
747 rdev = dev->dev_private; 827 rdev = dev->dev_private;
748 828
829 if (rdev->powered_down)
830 return 0;
749 /* unpin the front buffers */ 831 /* unpin the front buffers */
750 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 832 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
751 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 833 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
@@ -791,6 +873,9 @@ int radeon_resume_kms(struct drm_device *dev)
791{ 873{
792 struct radeon_device *rdev = dev->dev_private; 874 struct radeon_device *rdev = dev->dev_private;
793 875
876 if (rdev->powered_down)
877 return 0;
878
794 acquire_console_sem(); 879 acquire_console_sem();
795 pci_set_power_state(dev->pdev, PCI_D0); 880 pci_set_power_state(dev->pdev, PCI_D0);
796 pci_restore_state(dev->pdev); 881 pci_restore_state(dev->pdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7e17a362b54b..ba8d806dcf39 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); 68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
69} 69}
70 70
71static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
72{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
74 struct drm_device *dev = crtc->dev;
75 struct radeon_device *rdev = dev->dev_private;
76 int i;
77
78 DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
79 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
80
81 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
82 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
84
85 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
88
89 WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
91
92 WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
93 for (i = 0; i < 256; i++) {
94 WREG32(EVERGREEN_DC_LUT_30_COLOR,
95 (radeon_crtc->lut_r[i] << 20) |
96 (radeon_crtc->lut_g[i] << 10) |
97 (radeon_crtc->lut_b[i] << 0));
98 }
99}
100
71static void legacy_crtc_load_lut(struct drm_crtc *crtc) 101static void legacy_crtc_load_lut(struct drm_crtc *crtc)
72{ 102{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 103 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
100 if (!crtc->enabled) 130 if (!crtc->enabled)
101 return; 131 return;
102 132
103 if (ASIC_IS_AVIVO(rdev)) 133 if (ASIC_IS_DCE4(rdev))
134 evergreen_crtc_load_lut(crtc);
135 else if (ASIC_IS_AVIVO(rdev))
104 avivo_crtc_load_lut(crtc); 136 avivo_crtc_load_lut(crtc);
105 else 137 else
106 legacy_crtc_load_lut(crtc); 138 legacy_crtc_load_lut(crtc);
@@ -361,6 +393,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
361 393
362int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) 394int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
363{ 395{
396 struct drm_device *dev = radeon_connector->base.dev;
397 struct radeon_device *rdev = dev->dev_private;
364 int ret = 0; 398 int ret = 0;
365 399
366 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 400 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -373,11 +407,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
373 if (!radeon_connector->ddc_bus) 407 if (!radeon_connector->ddc_bus)
374 return -1; 408 return -1;
375 if (!radeon_connector->edid) { 409 if (!radeon_connector->edid) {
376 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
377 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 410 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
378 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
379 } 411 }
380 412 /* some servers provide a hardcoded edid in rom for KVMs */
413 if (!radeon_connector->edid)
414 radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
381 if (radeon_connector->edid) { 415 if (radeon_connector->edid) {
382 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 416 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
383 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 417 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -395,9 +429,7 @@ static int radeon_ddc_dump(struct drm_connector *connector)
395 429
396 if (!radeon_connector->ddc_bus) 430 if (!radeon_connector->ddc_bus)
397 return -1; 431 return -1;
398 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
399 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); 432 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
400 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
401 if (edid) { 433 if (edid) {
402 kfree(edid); 434 kfree(edid);
403 } 435 }
@@ -414,13 +446,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
414 return n; 446 return n;
415} 447}
416 448
417void radeon_compute_pll(struct radeon_pll *pll, 449static void radeon_compute_pll_legacy(struct radeon_pll *pll,
418 uint64_t freq, 450 uint64_t freq,
419 uint32_t *dot_clock_p, 451 uint32_t *dot_clock_p,
420 uint32_t *fb_div_p, 452 uint32_t *fb_div_p,
421 uint32_t *frac_fb_div_p, 453 uint32_t *frac_fb_div_p,
422 uint32_t *ref_div_p, 454 uint32_t *ref_div_p,
423 uint32_t *post_div_p) 455 uint32_t *post_div_p)
424{ 456{
425 uint32_t min_ref_div = pll->min_ref_div; 457 uint32_t min_ref_div = pll->min_ref_div;
426 uint32_t max_ref_div = pll->max_ref_div; 458 uint32_t max_ref_div = pll->max_ref_div;
@@ -580,95 +612,194 @@ void radeon_compute_pll(struct radeon_pll *pll,
580 *post_div_p = best_post_div; 612 *post_div_p = best_post_div;
581} 613}
582 614
583void radeon_compute_pll_avivo(struct radeon_pll *pll, 615static bool
584 uint64_t freq, 616calc_fb_div(struct radeon_pll *pll,
585 uint32_t *dot_clock_p, 617 uint32_t freq,
586 uint32_t *fb_div_p, 618 uint32_t post_div,
587 uint32_t *frac_fb_div_p, 619 uint32_t ref_div,
588 uint32_t *ref_div_p, 620 uint32_t *fb_div,
589 uint32_t *post_div_p) 621 uint32_t *fb_div_frac)
590{ 622{
591 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 623 fixed20_12 feedback_divider, a, b;
592 fixed20_12 pll_out_max, pll_out_min; 624 u32 vco_freq;
593 fixed20_12 pll_in_max, pll_in_min; 625
594 fixed20_12 reference_freq; 626 vco_freq = freq * post_div;
595 fixed20_12 error, ffreq, a, b; 627 /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
596 628 a.full = rfixed_const(pll->reference_freq);
597 pll_out_max.full = rfixed_const(pll->pll_out_max); 629 feedback_divider.full = rfixed_const(vco_freq);
598 pll_out_min.full = rfixed_const(pll->pll_out_min); 630 feedback_divider.full = rfixed_div(feedback_divider, a);
599 pll_in_max.full = rfixed_const(pll->pll_in_max); 631 a.full = rfixed_const(ref_div);
600 pll_in_min.full = rfixed_const(pll->pll_in_min); 632 feedback_divider.full = rfixed_mul(feedback_divider, a);
601 reference_freq.full = rfixed_const(pll->reference_freq); 633
602 do_div(freq, 10); 634 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
635 /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
636 a.full = rfixed_const(10);
637 feedback_divider.full = rfixed_mul(feedback_divider, a);
638 feedback_divider.full += rfixed_const_half(0);
639 feedback_divider.full = rfixed_floor(feedback_divider);
640 feedback_divider.full = rfixed_div(feedback_divider, a);
641
642 /* *fb_div = floor(feedback_divider); */
643 a.full = rfixed_floor(feedback_divider);
644 *fb_div = rfixed_trunc(a);
645 /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
646 a.full = rfixed_const(10);
647 b.full = rfixed_mul(feedback_divider, a);
648
649 feedback_divider.full = rfixed_floor(feedback_divider);
650 feedback_divider.full = rfixed_mul(feedback_divider, a);
651 feedback_divider.full = b.full - feedback_divider.full;
652 *fb_div_frac = rfixed_trunc(feedback_divider);
653 } else {
654 /* *fb_div = floor(feedback_divider + 0.5); */
655 feedback_divider.full += rfixed_const_half(0);
656 feedback_divider.full = rfixed_floor(feedback_divider);
657
658 *fb_div = rfixed_trunc(feedback_divider);
659 *fb_div_frac = 0;
660 }
661
662 if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
663 return false;
664 else
665 return true;
666}
667
668static bool
669calc_fb_ref_div(struct radeon_pll *pll,
670 uint32_t freq,
671 uint32_t post_div,
672 uint32_t *fb_div,
673 uint32_t *fb_div_frac,
674 uint32_t *ref_div)
675{
676 fixed20_12 ffreq, max_error, error, pll_out, a;
677 u32 vco;
678
603 ffreq.full = rfixed_const(freq); 679 ffreq.full = rfixed_const(freq);
604 error.full = rfixed_const(100 * 100); 680 /* max_error = ffreq * 0.0025; */
681 a.full = rfixed_const(400);
682 max_error.full = rfixed_div(ffreq, a);
605 683
606 /* max p */ 684 for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
607 p.full = rfixed_div(pll_out_max, ffreq); 685 if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
608 p.full = rfixed_floor(p); 686 vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
687 vco = vco / ((*ref_div) * 10);
609 688
610 /* min m */ 689 if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
611 m.full = rfixed_div(reference_freq, pll_in_max); 690 continue;
612 m.full = rfixed_ceil(m);
613 691
614 while (1) { 692 /* pll_out = vco / post_div; */
615 n.full = rfixed_div(ffreq, reference_freq); 693 a.full = rfixed_const(post_div);
616 n.full = rfixed_mul(n, m); 694 pll_out.full = rfixed_const(vco);
617 n.full = rfixed_mul(n, p); 695 pll_out.full = rfixed_div(pll_out, a);
618 696
619 f_vco.full = rfixed_div(n, m); 697 if (pll_out.full >= ffreq.full) {
620 f_vco.full = rfixed_mul(f_vco, reference_freq); 698 error.full = pll_out.full - ffreq.full;
699 if (error.full <= max_error.full)
700 return true;
701 }
702 }
703 }
704 return false;
705}
621 706
622 f_pclk.full = rfixed_div(f_vco, p); 707static void radeon_compute_pll_new(struct radeon_pll *pll,
708 uint64_t freq,
709 uint32_t *dot_clock_p,
710 uint32_t *fb_div_p,
711 uint32_t *frac_fb_div_p,
712 uint32_t *ref_div_p,
713 uint32_t *post_div_p)
714{
715 u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
716 u32 best_freq = 0, vco_frequency;
623 717
624 if (f_pclk.full > ffreq.full) 718 /* freq = freq / 10; */
625 error.full = f_pclk.full - ffreq.full; 719 do_div(freq, 10);
626 else
627 error.full = ffreq.full - f_pclk.full;
628 error.full = rfixed_div(error, f_pclk);
629 a.full = rfixed_const(100 * 100);
630 error.full = rfixed_mul(error, a);
631
632 a.full = rfixed_mul(m, p);
633 a.full = rfixed_div(n, a);
634 best_freq.full = rfixed_mul(reference_freq, a);
635
636 if (rfixed_trunc(error) < 25)
637 break;
638
639 a.full = rfixed_const(1);
640 m.full = m.full + a.full;
641 a.full = rfixed_div(reference_freq, m);
642 if (a.full >= pll_in_min.full)
643 continue;
644 720
645 m.full = rfixed_div(reference_freq, pll_in_max); 721 if (pll->flags & RADEON_PLL_USE_POST_DIV) {
646 m.full = rfixed_ceil(m); 722 post_div = pll->post_div;
647 a.full= rfixed_const(1); 723 if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
648 p.full = p.full - a.full; 724 goto done;
649 a.full = rfixed_mul(p, ffreq); 725
650 if (a.full >= pll_out_min.full) 726 vco_frequency = freq * post_div;
651 continue; 727 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
652 else { 728 goto done;
653 DRM_ERROR("Unable to find pll dividers\n"); 729
654 break; 730 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
731 ref_div = pll->reference_div;
732 if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
733 goto done;
734 if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
735 goto done;
736 }
737 } else {
738 for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
739 if (pll->flags & RADEON_PLL_LEGACY) {
740 if ((post_div == 5) ||
741 (post_div == 7) ||
742 (post_div == 9) ||
743 (post_div == 10) ||
744 (post_div == 11))
745 continue;
746 }
747
748 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
749 continue;
750
751 vco_frequency = freq * post_div;
752 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
753 continue;
754 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
755 ref_div = pll->reference_div;
756 if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
757 goto done;
758 if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
759 break;
760 } else {
761 if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
762 break;
763 }
655 } 764 }
656 } 765 }
657 766
658 a.full = rfixed_const(10); 767 best_freq = pll->reference_freq * 10 * fb_div;
659 b.full = rfixed_mul(n, a); 768 best_freq += pll->reference_freq * fb_div_frac;
769 best_freq = best_freq / (ref_div * post_div);
660 770
661 frac_n.full = rfixed_floor(n); 771done:
662 frac_n.full = rfixed_mul(frac_n, a); 772 if (best_freq == 0)
663 frac_n.full = b.full - frac_n.full; 773 DRM_ERROR("Couldn't find valid PLL dividers\n");
664 774
665 *dot_clock_p = rfixed_trunc(best_freq); 775 *dot_clock_p = best_freq / 10;
666 *fb_div_p = rfixed_trunc(n); 776 *fb_div_p = fb_div;
667 *frac_fb_div_p = rfixed_trunc(frac_n); 777 *frac_fb_div_p = fb_div_frac;
668 *ref_div_p = rfixed_trunc(m); 778 *ref_div_p = ref_div;
669 *post_div_p = rfixed_trunc(p); 779 *post_div_p = post_div;
670 780
671 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); 781 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
782}
783
784void radeon_compute_pll(struct radeon_pll *pll,
785 uint64_t freq,
786 uint32_t *dot_clock_p,
787 uint32_t *fb_div_p,
788 uint32_t *frac_fb_div_p,
789 uint32_t *ref_div_p,
790 uint32_t *post_div_p)
791{
792 switch (pll->algo) {
793 case PLL_ALGO_NEW:
794 radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
795 frac_fb_div_p, ref_div_p, post_div_p);
796 break;
797 case PLL_ALGO_LEGACY:
798 default:
799 radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
800 frac_fb_div_p, ref_div_p, post_div_p);
801 break;
802 }
672} 803}
673 804
674static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 805static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -679,11 +810,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
679 if (fb->fbdev) 810 if (fb->fbdev)
680 radeonfb_remove(dev, fb); 811 radeonfb_remove(dev, fb);
681 812
682 if (radeon_fb->obj) { 813 if (radeon_fb->obj)
683 mutex_lock(&dev->struct_mutex); 814 drm_gem_object_unreference_unlocked(radeon_fb->obj);
684 drm_gem_object_unreference(radeon_fb->obj);
685 mutex_unlock(&dev->struct_mutex);
686 }
687 drm_framebuffer_cleanup(fb); 815 drm_framebuffer_cleanup(fb);
688 kfree(radeon_fb); 816 kfree(radeon_fb);
689} 817}
@@ -819,7 +947,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
819 947
820int radeon_modeset_init(struct radeon_device *rdev) 948int radeon_modeset_init(struct radeon_device *rdev)
821{ 949{
822 int num_crtc = 2, i; 950 int i;
823 int ret; 951 int ret;
824 952
825 drm_mode_config_init(rdev->ddev); 953 drm_mode_config_init(rdev->ddev);
@@ -842,11 +970,23 @@ int radeon_modeset_init(struct radeon_device *rdev)
842 return ret; 970 return ret;
843 } 971 }
844 972
973 /* check combios for a valid hardcoded EDID - Sun servers */
974 if (!rdev->is_atom_bios) {
975 /* check for hardcoded EDID in BIOS */
976 radeon_combios_check_hardcoded_edid(rdev);
977 }
978
845 if (rdev->flags & RADEON_SINGLE_CRTC) 979 if (rdev->flags & RADEON_SINGLE_CRTC)
846 num_crtc = 1; 980 rdev->num_crtc = 1;
981 else {
982 if (ASIC_IS_DCE4(rdev))
983 rdev->num_crtc = 6;
984 else
985 rdev->num_crtc = 2;
986 }
847 987
848 /* allocate crtcs */ 988 /* allocate crtcs */
849 for (i = 0; i < num_crtc; i++) { 989 for (i = 0; i < rdev->num_crtc; i++) {
850 radeon_crtc_init(rdev->ddev, i); 990 radeon_crtc_init(rdev->ddev, i);
851 } 991 }
852 992
@@ -863,6 +1003,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
863 1003
864void radeon_modeset_fini(struct radeon_device *rdev) 1004void radeon_modeset_fini(struct radeon_device *rdev)
865{ 1005{
1006 kfree(rdev->mode_info.bios_hardcoded_edid);
1007
866 if (rdev->mode_info.mode_config_initialized) { 1008 if (rdev->mode_info.mode_config_initialized) {
867 radeon_hpd_fini(rdev); 1009 radeon_hpd_fini(rdev);
868 drm_mode_config_cleanup(rdev->ddev); 1010 drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8ba3de7994d4..6eec0ece6a6c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -40,9 +40,11 @@
40 40
41/* 41/*
42 * KMS wrapper. 42 * KMS wrapper.
43 * - 2.0.0 - initial interface
44 * - 2.1.0 - add square tiling interface
43 */ 45 */
44#define KMS_DRIVER_MAJOR 2 46#define KMS_DRIVER_MAJOR 2
45#define KMS_DRIVER_MINOR 0 47#define KMS_DRIVER_MINOR 1
46#define KMS_DRIVER_PATCHLEVEL 0 48#define KMS_DRIVER_PATCHLEVEL 0
47int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 49int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
48int radeon_driver_unload_kms(struct drm_device *dev); 50int radeon_driver_unload_kms(struct drm_device *dev);
@@ -86,7 +88,8 @@ int radeon_benchmarking = 0;
86int radeon_testing = 0; 88int radeon_testing = 0;
87int radeon_connector_table = 0; 89int radeon_connector_table = 0;
88int radeon_tv = 1; 90int radeon_tv = 1;
89int radeon_new_pll = 1; 91int radeon_new_pll = -1;
92int radeon_dynpm = -1;
90int radeon_audio = 1; 93int radeon_audio = 1;
91 94
92MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 95MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
@@ -122,9 +125,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
122MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 125MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
123module_param_named(tv, radeon_tv, int, 0444); 126module_param_named(tv, radeon_tv, int, 0444);
124 127
125MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); 128MODULE_PARM_DESC(new_pll, "Select new PLL code");
126module_param_named(new_pll, radeon_new_pll, int, 0444); 129module_param_named(new_pll, radeon_new_pll, int, 0444);
127 130
131MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
132module_param_named(dynpm, radeon_dynpm, int, 0444);
133
128MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 134MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
129module_param_named(audio, radeon_audio, int, 0444); 135module_param_named(audio, radeon_audio, int, 0444);
130 136
@@ -339,6 +345,7 @@ static int __init radeon_init(void)
339 driver = &kms_driver; 345 driver = &kms_driver;
340 driver->driver_features |= DRIVER_MODESET; 346 driver->driver_features |= DRIVER_MODESET;
341 driver->num_ioctls = radeon_max_kms_ioctl; 347 driver->num_ioctls = radeon_max_kms_ioctl;
348 radeon_register_atpx_handler();
342 } 349 }
343 /* if the vga console setting is enabled still 350 /* if the vga console setting is enabled still
344 * let modprobe override it */ 351 * let modprobe override it */
@@ -348,6 +355,7 @@ static int __init radeon_init(void)
348static void __exit radeon_exit(void) 355static void __exit radeon_exit(void)
349{ 356{
350 drm_exit(driver); 357 drm_exit(driver);
358 radeon_unregister_atpx_handler();
351} 359}
352 360
353module_init(radeon_init); 361module_init(radeon_init);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index c57ad606504d..ec55f2b23c22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -268,6 +268,8 @@ typedef struct drm_radeon_private {
268 268
269 u32 scratch_ages[5]; 269 u32 scratch_ages[5];
270 270
271 int have_z_offset;
272
271 /* starting from here on, data is preserved accross an open */ 273 /* starting from here on, data is preserved accross an open */
272 uint32_t flags; /* see radeon_chip_flags */ 274 uint32_t flags; /* see radeon_chip_flags */
273 resource_size_t fb_aper_offset; 275 resource_size_t fb_aper_offset;
@@ -295,6 +297,9 @@ typedef struct drm_radeon_private {
295 int r700_sc_prim_fifo_size; 297 int r700_sc_prim_fifo_size;
296 int r700_sc_hiz_tile_fifo_size; 298 int r700_sc_hiz_tile_fifo_size;
297 int r700_sc_earlyz_tile_fifo_fize; 299 int r700_sc_earlyz_tile_fifo_fize;
300 int r600_group_size;
301 int r600_npipes;
302 int r600_nbanks;
298 303
299 struct mutex cs_mutex; 304 struct mutex cs_mutex;
300 u32 cs_id_scnt; 305 u32 cs_id_scnt;
@@ -310,9 +315,11 @@ typedef struct drm_radeon_buf_priv {
310 u32 age; 315 u32 age;
311} drm_radeon_buf_priv_t; 316} drm_radeon_buf_priv_t;
312 317
318struct drm_buffer;
319
313typedef struct drm_radeon_kcmd_buffer { 320typedef struct drm_radeon_kcmd_buffer {
314 int bufsz; 321 int bufsz;
315 char *buf; 322 struct drm_buffer *buffer;
316 int nbox; 323 int nbox;
317 struct drm_clip_rect __user *boxes; 324 struct drm_clip_rect __user *boxes;
318} drm_radeon_kcmd_buffer_t; 325} drm_radeon_kcmd_buffer_t;
@@ -455,6 +462,15 @@ extern void r600_blit_swap(struct drm_device *dev,
455 int sx, int sy, int dx, int dy, 462 int sx, int sy, int dx, int dy,
456 int w, int h, int src_pitch, int dst_pitch, int cpp); 463 int w, int h, int src_pitch, int dst_pitch, int cpp);
457 464
465/* atpx handler */
466#if defined(CONFIG_VGA_SWITCHEROO)
467void radeon_register_atpx_handler(void);
468void radeon_unregister_atpx_handler(void);
469#else
470static inline void radeon_register_atpx_handler(void) {}
471static inline void radeon_unregister_atpx_handler(void) {}
472#endif
473
458/* Flags for stats.boxes 474/* Flags for stats.boxes
459 */ 475 */
460#define RADEON_BOX_DMA_IDLE 0x1 476#define RADEON_BOX_DMA_IDLE 0x1
@@ -2122,4 +2138,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
2122 write &= mask; \ 2138 write &= mask; \
2123} while (0) 2139} while (0)
2124 2140
2141/**
2142 * Copy given number of dwords from drm buffer to the ring buffer.
2143 */
2144#define OUT_RING_DRM_BUFFER(buf, sz) do { \
2145 int _size = (sz) * 4; \
2146 struct drm_buffer *_buf = (buf); \
2147 int _part_size; \
2148 while (_size > 0) { \
2149 _part_size = _size; \
2150 \
2151 if (write + _part_size/4 > mask) \
2152 _part_size = ((mask + 1) - write)*4; \
2153 \
2154 if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
2155 _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
2156 \
2157 \
2158 \
2159 memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
2160 [drm_buffer_index(_buf)], _part_size); \
2161 \
2162 _size -= _part_size; \
2163 write = (write + _part_size/4) & mask; \
2164 drm_buffer_advance(_buf, _part_size); \
2165 } \
2166} while (0)
2167
2168
2125#endif /* __RADEON_DRV_H__ */ 2169#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c91724457ca..bc926ea0a530 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
53 /* DVO requires 2x ppll clocks depending on tmds chip */ 53 /* DVO requires 2x ppll clocks depending on tmds chip */
54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) 54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
55 return index_mask; 55 return index_mask;
56 56
57 count = -1; 57 count = -1;
58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { 58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); 59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
@@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
228 return NULL; 228 return NULL;
229} 229}
230 230
231static struct radeon_connector_atom_dig *
232radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
233{
234 struct drm_device *dev = encoder->dev;
235 struct radeon_device *rdev = dev->dev_private;
236 struct drm_connector *connector;
237 struct radeon_connector *radeon_connector;
238 struct radeon_connector_atom_dig *dig_connector;
239
240 if (!rdev->is_atom_bios)
241 return NULL;
242
243 connector = radeon_get_connector_for_encoder(encoder);
244 if (!connector)
245 return NULL;
246
247 radeon_connector = to_radeon_connector(connector);
248
249 if (!radeon_connector->con_priv)
250 return NULL;
251
252 dig_connector = radeon_connector->con_priv;
253
254 return dig_connector;
255}
256
231static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 257static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
232 struct drm_display_mode *mode, 258 struct drm_display_mode *mode,
233 struct drm_display_mode *adjusted_mode) 259 struct drm_display_mode *adjusted_mode)
@@ -236,6 +262,9 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
236 struct drm_device *dev = encoder->dev; 262 struct drm_device *dev = encoder->dev;
237 struct radeon_device *rdev = dev->dev_private; 263 struct radeon_device *rdev = dev->dev_private;
238 264
265 /* adjust pm to upcoming mode change */
266 radeon_pm_compute_clocks(rdev);
267
239 /* set the active encoder to connector routing */ 268 /* set the active encoder to connector routing */
240 radeon_encoder_set_active_device(encoder); 269 radeon_encoder_set_active_device(encoder);
241 drm_mode_set_crtcinfo(adjusted_mode, 0); 270 drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -458,34 +487,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
458 struct drm_device *dev = encoder->dev; 487 struct drm_device *dev = encoder->dev;
459 struct radeon_device *rdev = dev->dev_private; 488 struct radeon_device *rdev = dev->dev_private;
460 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 489 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
490 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
491 struct radeon_connector_atom_dig *dig_connector =
492 radeon_get_atom_connector_priv_from_encoder(encoder);
461 union lvds_encoder_control args; 493 union lvds_encoder_control args;
462 int index = 0; 494 int index = 0;
463 int hdmi_detected = 0; 495 int hdmi_detected = 0;
464 uint8_t frev, crev; 496 uint8_t frev, crev;
465 struct radeon_encoder_atom_dig *dig;
466 struct drm_connector *connector;
467 struct radeon_connector *radeon_connector;
468 struct radeon_connector_atom_dig *dig_connector;
469 497
470 connector = radeon_get_connector_for_encoder(encoder); 498 if (!dig || !dig_connector)
471 if (!connector)
472 return; 499 return;
473 500
474 radeon_connector = to_radeon_connector(connector); 501 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
475
476 if (!radeon_encoder->enc_priv)
477 return;
478
479 dig = radeon_encoder->enc_priv;
480
481 if (!radeon_connector->con_priv)
482 return;
483
484 if (drm_detect_hdmi_monitor(radeon_connector->edid))
485 hdmi_detected = 1; 502 hdmi_detected = 1;
486 503
487 dig_connector = radeon_connector->con_priv;
488
489 memset(&args, 0, sizeof(args)); 504 memset(&args, 0, sizeof(args));
490 505
491 switch (radeon_encoder->encoder_id) { 506 switch (radeon_encoder->encoder_id) {
@@ -586,7 +601,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
586{ 601{
587 struct drm_connector *connector; 602 struct drm_connector *connector;
588 struct radeon_connector *radeon_connector; 603 struct radeon_connector *radeon_connector;
589 struct radeon_connector_atom_dig *radeon_dig_connector; 604 struct radeon_connector_atom_dig *dig_connector;
590 605
591 connector = radeon_get_connector_for_encoder(encoder); 606 connector = radeon_get_connector_for_encoder(encoder);
592 if (!connector) 607 if (!connector)
@@ -617,9 +632,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
617 break; 632 break;
618 case DRM_MODE_CONNECTOR_DisplayPort: 633 case DRM_MODE_CONNECTOR_DisplayPort:
619 case DRM_MODE_CONNECTOR_eDP: 634 case DRM_MODE_CONNECTOR_eDP:
620 radeon_dig_connector = radeon_connector->con_priv; 635 dig_connector = radeon_connector->con_priv;
621 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 636 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
622 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 637 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
623 return ATOM_ENCODER_MODE_DP; 638 return ATOM_ENCODER_MODE_DP;
624 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) 639 else if (drm_detect_hdmi_monitor(radeon_connector->edid))
625 return ATOM_ENCODER_MODE_HDMI; 640 return ATOM_ENCODER_MODE_HDMI;
@@ -656,6 +671,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
656 * - 2 DIG encoder blocks. 671 * - 2 DIG encoder blocks.
657 * DIG1/2 can drive UNIPHY0/1/2 link A or link B 672 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
658 * 673 *
674 * DCE 4.0
675 * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
676 * Supports up to 6 digital outputs
677 * - 6 DIG encoder blocks.
678 * - DIG to PHY mapping is hardcoded
679 * DIG1 drives UNIPHY0 link A, A+B
680 * DIG2 drives UNIPHY0 link B
681 * DIG3 drives UNIPHY1 link A, A+B
682 * DIG4 drives UNIPHY1 link B
683 * DIG5 drives UNIPHY2 link A, A+B
684 * DIG6 drives UNIPHY2 link B
685 *
659 * Routing 686 * Routing
660 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) 687 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
661 * Examples: 688 * Examples:
@@ -664,88 +691,78 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
664 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS 691 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
665 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI 692 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
666 */ 693 */
667static void 694
695union dig_encoder_control {
696 DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
697 DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
698 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
699};
700
701void
668atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 702atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
669{ 703{
670 struct drm_device *dev = encoder->dev; 704 struct drm_device *dev = encoder->dev;
671 struct radeon_device *rdev = dev->dev_private; 705 struct radeon_device *rdev = dev->dev_private;
672 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 706 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
673 DIG_ENCODER_CONTROL_PS_ALLOCATION args; 707 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
708 struct radeon_connector_atom_dig *dig_connector =
709 radeon_get_atom_connector_priv_from_encoder(encoder);
710 union dig_encoder_control args;
674 int index = 0, num = 0; 711 int index = 0, num = 0;
675 uint8_t frev, crev; 712 uint8_t frev, crev;
676 struct radeon_encoder_atom_dig *dig;
677 struct drm_connector *connector;
678 struct radeon_connector *radeon_connector;
679 struct radeon_connector_atom_dig *dig_connector;
680 713
681 connector = radeon_get_connector_for_encoder(encoder); 714 if (!dig || !dig_connector)
682 if (!connector)
683 return; 715 return;
684 716
685 radeon_connector = to_radeon_connector(connector);
686
687 if (!radeon_connector->con_priv)
688 return;
689
690 dig_connector = radeon_connector->con_priv;
691
692 if (!radeon_encoder->enc_priv)
693 return;
694
695 dig = radeon_encoder->enc_priv;
696
697 memset(&args, 0, sizeof(args)); 717 memset(&args, 0, sizeof(args));
698 718
699 if (dig->dig_encoder) 719 if (ASIC_IS_DCE4(rdev))
700 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 720 index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
701 else 721 else {
702 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 722 if (dig->dig_encoder)
723 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
724 else
725 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
726 }
703 num = dig->dig_encoder + 1; 727 num = dig->dig_encoder + 1;
704 728
705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 729 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
706 730
707 args.ucAction = action; 731 args.v1.ucAction = action;
708 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 732 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
733 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
709 734
710 if (ASIC_IS_DCE32(rdev)) { 735 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
736 if (dig_connector->dp_clock == 270000)
737 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
738 args.v1.ucLaneNum = dig_connector->dp_lane_count;
739 } else if (radeon_encoder->pixel_clock > 165000)
740 args.v1.ucLaneNum = 8;
741 else
742 args.v1.ucLaneNum = 4;
743
744 if (ASIC_IS_DCE4(rdev)) {
745 args.v3.acConfig.ucDigSel = dig->dig_encoder;
746 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
747 } else {
711 switch (radeon_encoder->encoder_id) { 748 switch (radeon_encoder->encoder_id) {
712 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 749 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
713 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; 750 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
714 break; 751 break;
715 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 752 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
716 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; 753 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
754 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
717 break; 755 break;
718 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 756 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
719 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; 757 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
720 break;
721 }
722 } else {
723 switch (radeon_encoder->encoder_id) {
724 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
725 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
726 break;
727 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
728 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
729 break; 758 break;
730 } 759 }
760 if (dig_connector->linkb)
761 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
762 else
763 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
731 } 764 }
732 765
733 args.ucEncoderMode = atombios_get_encoder_mode(encoder);
734
735 if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
736 if (dig_connector->dp_clock == 270000)
737 args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
738 args.ucLaneNum = dig_connector->dp_lane_count;
739 } else if (radeon_encoder->pixel_clock > 165000)
740 args.ucLaneNum = 8;
741 else
742 args.ucLaneNum = 4;
743
744 if (dig_connector->linkb)
745 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
746 else
747 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
748
749 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 766 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
750 767
751} 768}
@@ -753,6 +770,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
753union dig_transmitter_control { 770union dig_transmitter_control {
754 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; 771 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
755 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; 772 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
773 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
756}; 774};
757 775
758void 776void
@@ -761,37 +779,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
761 struct drm_device *dev = encoder->dev; 779 struct drm_device *dev = encoder->dev;
762 struct radeon_device *rdev = dev->dev_private; 780 struct radeon_device *rdev = dev->dev_private;
763 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 781 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
782 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
783 struct radeon_connector_atom_dig *dig_connector =
784 radeon_get_atom_connector_priv_from_encoder(encoder);
785 struct drm_connector *connector;
786 struct radeon_connector *radeon_connector;
764 union dig_transmitter_control args; 787 union dig_transmitter_control args;
765 int index = 0, num = 0; 788 int index = 0, num = 0;
766 uint8_t frev, crev; 789 uint8_t frev, crev;
767 struct radeon_encoder_atom_dig *dig;
768 struct drm_connector *connector;
769 struct radeon_connector *radeon_connector;
770 struct radeon_connector_atom_dig *dig_connector;
771 bool is_dp = false; 790 bool is_dp = false;
791 int pll_id = 0;
772 792
773 connector = radeon_get_connector_for_encoder(encoder); 793 if (!dig || !dig_connector)
774 if (!connector)
775 return; 794 return;
776 795
796 connector = radeon_get_connector_for_encoder(encoder);
777 radeon_connector = to_radeon_connector(connector); 797 radeon_connector = to_radeon_connector(connector);
778 798
779 if (!radeon_encoder->enc_priv)
780 return;
781
782 dig = radeon_encoder->enc_priv;
783
784 if (!radeon_connector->con_priv)
785 return;
786
787 dig_connector = radeon_connector->con_priv;
788
789 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 799 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
790 is_dp = true; 800 is_dp = true;
791 801
792 memset(&args, 0, sizeof(args)); 802 memset(&args, 0, sizeof(args));
793 803
794 if (ASIC_IS_DCE32(rdev)) 804 if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
795 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 805 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
796 else { 806 else {
797 switch (radeon_encoder->encoder_id) { 807 switch (radeon_encoder->encoder_id) {
@@ -821,7 +831,54 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
821 else 831 else
822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 832 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
823 } 833 }
824 if (ASIC_IS_DCE32(rdev)) { 834 if (ASIC_IS_DCE4(rdev)) {
835 if (is_dp)
836 args.v3.ucLaneNum = dig_connector->dp_lane_count;
837 else if (radeon_encoder->pixel_clock > 165000)
838 args.v3.ucLaneNum = 8;
839 else
840 args.v3.ucLaneNum = 4;
841
842 if (dig_connector->linkb) {
843 args.v3.acConfig.ucLinkSel = 1;
844 args.v3.acConfig.ucEncoderSel = 1;
845 }
846
847 /* Select the PLL for the PHY
848 * DP PHY should be clocked from external src if there is
849 * one.
850 */
851 if (encoder->crtc) {
852 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
853 pll_id = radeon_crtc->pll_id;
854 }
855 if (is_dp && rdev->clock.dp_extclk)
856 args.v3.acConfig.ucRefClkSource = 2; /* external src */
857 else
858 args.v3.acConfig.ucRefClkSource = pll_id;
859
860 switch (radeon_encoder->encoder_id) {
861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
862 args.v3.acConfig.ucTransmitterSel = 0;
863 num = 0;
864 break;
865 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
866 args.v3.acConfig.ucTransmitterSel = 1;
867 num = 1;
868 break;
869 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
870 args.v3.acConfig.ucTransmitterSel = 2;
871 num = 2;
872 break;
873 }
874
875 if (is_dp)
876 args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
877 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
878 if (dig->coherent_mode)
879 args.v3.acConfig.fCoherentMode = 1;
880 }
881 } else if (ASIC_IS_DCE32(rdev)) {
825 if (dig->dig_encoder == 1) 882 if (dig->dig_encoder == 1)
826 args.v2.acConfig.ucEncoderSel = 1; 883 args.v2.acConfig.ucEncoderSel = 1;
827 if (dig_connector->linkb) 884 if (dig_connector->linkb)
@@ -849,7 +906,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
849 args.v2.acConfig.fCoherentMode = 1; 906 args.v2.acConfig.fCoherentMode = 1;
850 } 907 }
851 } else { 908 } else {
852
853 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 909 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
854 910
855 if (dig->dig_encoder) 911 if (dig->dig_encoder)
@@ -1024,9 +1080,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1024 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1080 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1025 } 1081 }
1026 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1082 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1083
1084 /* adjust pm to dpms change */
1085 radeon_pm_compute_clocks(rdev);
1027} 1086}
1028 1087
1029union crtc_sourc_param { 1088union crtc_source_param {
1030 SELECT_CRTC_SOURCE_PS_ALLOCATION v1; 1089 SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
1031 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; 1090 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
1032}; 1091};
@@ -1038,7 +1097,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1038 struct radeon_device *rdev = dev->dev_private; 1097 struct radeon_device *rdev = dev->dev_private;
1039 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1098 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1040 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 1099 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1041 union crtc_sourc_param args; 1100 union crtc_source_param args;
1042 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); 1101 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1043 uint8_t frev, crev; 1102 uint8_t frev, crev;
1044 struct radeon_encoder_atom_dig *dig; 1103 struct radeon_encoder_atom_dig *dig;
@@ -1107,10 +1166,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1166 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1108 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1167 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1109 dig = radeon_encoder->enc_priv; 1168 dig = radeon_encoder->enc_priv;
1110 if (dig->dig_encoder) 1169 switch (dig->dig_encoder) {
1111 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1170 case 0:
1112 else
1113 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1171 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1172 break;
1173 case 1:
1174 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1175 break;
1176 case 2:
1177 args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
1178 break;
1179 case 3:
1180 args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
1181 break;
1182 case 4:
1183 args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
1184 break;
1185 case 5:
1186 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
1187 break;
1188 }
1114 break; 1189 break;
1115 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1190 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1116 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1191 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
@@ -1167,6 +1242,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1167 } 1242 }
1168 1243
1169 /* set scaler clears this on some chips */ 1244 /* set scaler clears this on some chips */
1245 /* XXX check DCE4 */
1170 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { 1246 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
1171 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1247 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
1172 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 1248 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
@@ -1183,6 +1259,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1183 struct drm_encoder *test_encoder; 1259 struct drm_encoder *test_encoder;
1184 struct radeon_encoder_atom_dig *dig; 1260 struct radeon_encoder_atom_dig *dig;
1185 uint32_t dig_enc_in_use = 0; 1261 uint32_t dig_enc_in_use = 0;
1262
1263 if (ASIC_IS_DCE4(rdev)) {
1264 struct radeon_connector_atom_dig *dig_connector =
1265 radeon_get_atom_connector_priv_from_encoder(encoder);
1266
1267 switch (radeon_encoder->encoder_id) {
1268 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1269 if (dig_connector->linkb)
1270 return 1;
1271 else
1272 return 0;
1273 break;
1274 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1275 if (dig_connector->linkb)
1276 return 3;
1277 else
1278 return 2;
1279 break;
1280 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1281 if (dig_connector->linkb)
1282 return 5;
1283 else
1284 return 4;
1285 break;
1286 }
1287 }
1288
1186 /* on DCE32 and encoder can driver any block so just crtc id */ 1289 /* on DCE32 and encoder can driver any block so just crtc id */
1187 if (ASIC_IS_DCE32(rdev)) { 1290 if (ASIC_IS_DCE32(rdev)) {
1188 return radeon_crtc->crtc_id; 1291 return radeon_crtc->crtc_id;
@@ -1254,15 +1357,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1254 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1357 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1255 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1358 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1256 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1359 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1257 /* disable the encoder and transmitter */ 1360 if (ASIC_IS_DCE4(rdev)) {
1258 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1361 /* disable the transmitter */
1259 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1362 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1260 1363 /* setup and enable the encoder */
1261 /* setup and enable the encoder and transmitter */ 1364 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
1262 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1365
1263 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 1366 /* init and enable the transmitter */
1264 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1367 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1265 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1368 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1369 } else {
1370 /* disable the encoder and transmitter */
1371 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1372 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1373
1374 /* setup and enable the encoder and transmitter */
1375 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1376 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1377 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1378 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1379 }
1266 break; 1380 break;
1267 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1381 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1268 atombios_ddia_setup(encoder, ATOM_ENABLE); 1382 atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1282,7 +1396,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1282 } 1396 }
1283 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1397 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1284 1398
1285 r600_hdmi_setmode(encoder, adjusted_mode); 1399 /* XXX */
1400 if (!ASIC_IS_DCE4(rdev))
1401 r600_hdmi_setmode(encoder, adjusted_mode);
1286} 1402}
1287 1403
1288static bool 1404static bool
@@ -1480,10 +1596,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1480 return; 1596 return;
1481 1597
1482 encoder = &radeon_encoder->base; 1598 encoder = &radeon_encoder->base;
1483 if (rdev->flags & RADEON_SINGLE_CRTC) 1599 switch (rdev->num_crtc) {
1600 case 1:
1484 encoder->possible_crtcs = 0x1; 1601 encoder->possible_crtcs = 0x1;
1485 else 1602 break;
1603 case 2:
1604 default:
1486 encoder->possible_crtcs = 0x3; 1605 encoder->possible_crtcs = 0x3;
1606 break;
1607 case 6:
1608 encoder->possible_crtcs = 0x3f;
1609 break;
1610 }
1487 1611
1488 radeon_encoder->enc_priv = NULL; 1612 radeon_encoder->enc_priv = NULL;
1489 1613
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 797972e344a6..93c7d5d41914 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -75,6 +75,11 @@ enum radeon_family {
75 CHIP_RV730, 75 CHIP_RV730,
76 CHIP_RV710, 76 CHIP_RV710,
77 CHIP_RV740, 77 CHIP_RV740,
78 CHIP_CEDAR,
79 CHIP_REDWOOD,
80 CHIP_JUNIPER,
81 CHIP_CYPRESS,
82 CHIP_HEMLOCK,
78 CHIP_LAST, 83 CHIP_LAST,
79}; 84};
80 85
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d71e346e9ab5..8fccbf29235e 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -39,6 +39,8 @@
39 39
40#include "drm_fb_helper.h" 40#include "drm_fb_helper.h"
41 41
42#include <linux/vga_switcheroo.h>
43
42struct radeon_fb_device { 44struct radeon_fb_device {
43 struct drm_fb_helper helper; 45 struct drm_fb_helper helper;
44 struct radeon_framebuffer *rfb; 46 struct radeon_framebuffer *rfb;
@@ -148,7 +150,6 @@ int radeonfb_create(struct drm_device *dev,
148 unsigned long tmp; 150 unsigned long tmp;
149 bool fb_tiled = false; /* useful for testing */ 151 bool fb_tiled = false; /* useful for testing */
150 u32 tiling_flags = 0; 152 u32 tiling_flags = 0;
151 int crtc_count;
152 153
153 mode_cmd.width = surface_width; 154 mode_cmd.width = surface_width;
154 mode_cmd.height = surface_height; 155 mode_cmd.height = surface_height;
@@ -239,11 +240,7 @@ int radeonfb_create(struct drm_device *dev,
239 rfbdev = info->par; 240 rfbdev = info->par;
240 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 241 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
241 rfbdev->helper.dev = dev; 242 rfbdev->helper.dev = dev;
242 if (rdev->flags & RADEON_SINGLE_CRTC) 243 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
243 crtc_count = 1;
244 else
245 crtc_count = 2;
246 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
247 RADEONFB_CONN_LIMIT); 244 RADEONFB_CONN_LIMIT);
248 if (ret) 245 if (ret)
249 goto out_unref; 246 goto out_unref;
@@ -257,7 +254,7 @@ int radeonfb_create(struct drm_device *dev,
257 info->flags = FBINFO_DEFAULT; 254 info->flags = FBINFO_DEFAULT;
258 info->fbops = &radeonfb_ops; 255 info->fbops = &radeonfb_ops;
259 256
260 tmp = fb_gpuaddr - rdev->mc.vram_location; 257 tmp = fb_gpuaddr - rdev->mc.vram_start;
261 info->fix.smem_start = rdev->mc.aper_base + tmp; 258 info->fix.smem_start = rdev->mc.aper_base + tmp;
262 info->fix.smem_len = size; 259 info->fix.smem_len = size;
263 info->screen_base = fbptr; 260 info->screen_base = fbptr;
@@ -291,6 +288,7 @@ int radeonfb_create(struct drm_device *dev,
291 rfbdev->rdev = rdev; 288 rfbdev->rdev = rdev;
292 289
293 mutex_unlock(&rdev->ddev->struct_mutex); 290 mutex_unlock(&rdev->ddev->struct_mutex);
291 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
294 return 0; 292 return 0;
295 293
296out_unref: 294out_unref:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e73d56e83fa6..1770d3c07fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
139 unsigned t; 139 unsigned t;
140 unsigned p; 140 unsigned p;
141 int i, j; 141 int i, j;
142 u64 page_base;
142 143
143 if (!rdev->gart.ready) { 144 if (!rdev->gart.ready) {
144 WARN(1, "trying to unbind memory to unitialized GART !\n"); 145 WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
151 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 152 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
152 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
153 rdev->gart.pages[p] = NULL; 154 rdev->gart.pages[p] = NULL;
154 rdev->gart.pages_addr[p] = 0; 155 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
156 page_base = rdev->gart.pages_addr[p];
155 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 157 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
156 radeon_gart_set_page(rdev, t, 0); 158 radeon_gart_set_page(rdev, t, page_base);
159 page_base += RADEON_GPU_PAGE_SIZE;
157 } 160 }
158 } 161 }
159 } 162 }
@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
199 return 0; 202 return 0;
200} 203}
201 204
205void radeon_gart_restore(struct radeon_device *rdev)
206{
207 int i, j, t;
208 u64 page_base;
209
210 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
211 page_base = rdev->gart.pages_addr[i];
212 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
213 radeon_gart_set_page(rdev, t, page_base);
214 page_base += RADEON_GPU_PAGE_SIZE;
215 }
216 }
217 mb();
218 radeon_gart_tlb_flush(rdev);
219}
220
202int radeon_gart_init(struct radeon_device *rdev) 221int radeon_gart_init(struct radeon_device *rdev)
203{ 222{
223 int r, i;
224
204 if (rdev->gart.pages) { 225 if (rdev->gart.pages) {
205 return 0; 226 return 0;
206 } 227 }
@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
209 DRM_ERROR("Page size is smaller than GPU page size!\n"); 230 DRM_ERROR("Page size is smaller than GPU page size!\n");
210 return -EINVAL; 231 return -EINVAL;
211 } 232 }
233 r = radeon_dummy_page_init(rdev);
234 if (r)
235 return r;
212 /* Compute table size */ 236 /* Compute table size */
213 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; 237 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
214 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; 238 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
227 radeon_gart_fini(rdev); 251 radeon_gart_fini(rdev);
228 return -ENOMEM; 252 return -ENOMEM;
229 } 253 }
254 /* set GART entry to point to the dummy page by default */
255 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
256 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
257 }
230 return 0; 258 return 0;
231} 259}
232 260
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index db8e9a355a01..ef92d147d8f0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
69 if (r != -ERESTARTSYS) 69 if (r != -ERESTARTSYS)
70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
71 size, initial_domain, alignment, r); 71 size, initial_domain, alignment, r);
72 mutex_lock(&rdev->ddev->struct_mutex); 72 drm_gem_object_unreference_unlocked(gobj);
73 drm_gem_object_unreference(gobj);
74 mutex_unlock(&rdev->ddev->struct_mutex);
75 return r; 73 return r;
76 } 74 }
77 gobj->driver_private = robj; 75 gobj->driver_private = robj;
@@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
202 } 200 }
203 r = drm_gem_handle_create(filp, gobj, &handle); 201 r = drm_gem_handle_create(filp, gobj, &handle);
204 if (r) { 202 if (r) {
205 mutex_lock(&dev->struct_mutex); 203 drm_gem_object_unreference_unlocked(gobj);
206 drm_gem_object_unreference(gobj);
207 mutex_unlock(&dev->struct_mutex);
208 return r; 204 return r;
209 } 205 }
210 mutex_lock(&dev->struct_mutex); 206 drm_gem_object_handle_unreference_unlocked(gobj);
211 drm_gem_object_handle_unreference(gobj);
212 mutex_unlock(&dev->struct_mutex);
213 args->handle = handle; 207 args->handle = handle;
214 return 0; 208 return 0;
215} 209}
@@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
236 230
237 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 231 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
238 232
239 mutex_lock(&dev->struct_mutex); 233 drm_gem_object_unreference_unlocked(gobj);
240 drm_gem_object_unreference(gobj);
241 mutex_unlock(&dev->struct_mutex);
242 return r; 234 return r;
243} 235}
244 236
@@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
255 } 247 }
256 robj = gobj->driver_private; 248 robj = gobj->driver_private;
257 args->addr_ptr = radeon_bo_mmap_offset(robj); 249 args->addr_ptr = radeon_bo_mmap_offset(robj);
258 mutex_lock(&dev->struct_mutex); 250 drm_gem_object_unreference_unlocked(gobj);
259 drm_gem_object_unreference(gobj);
260 mutex_unlock(&dev->struct_mutex);
261 return 0; 251 return 0;
262} 252}
263 253
@@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
288 default: 278 default:
289 break; 279 break;
290 } 280 }
291 mutex_lock(&dev->struct_mutex); 281 drm_gem_object_unreference_unlocked(gobj);
292 drm_gem_object_unreference(gobj);
293 mutex_unlock(&dev->struct_mutex);
294 return r; 282 return r;
295} 283}
296 284
@@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
311 /* callback hw specific functions if any */ 299 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle) 300 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 301 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
314 mutex_lock(&dev->struct_mutex); 302 drm_gem_object_unreference_unlocked(gobj);
315 drm_gem_object_unreference(gobj);
316 mutex_unlock(&dev->struct_mutex);
317 return r; 303 return r;
318} 304}
319 305
@@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
331 return -EINVAL; 317 return -EINVAL;
332 robj = gobj->driver_private; 318 robj = gobj->driver_private;
333 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 319 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
334 mutex_lock(&dev->struct_mutex); 320 drm_gem_object_unreference_unlocked(gobj);
335 drm_gem_object_unreference(gobj);
336 mutex_unlock(&dev->struct_mutex);
337 return r; 321 return r;
338} 322}
339 323
@@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
356 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 340 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
357 radeon_bo_unreserve(rbo); 341 radeon_bo_unreserve(rbo);
358out: 342out:
359 mutex_lock(&dev->struct_mutex); 343 drm_gem_object_unreference_unlocked(gobj);
360 drm_gem_object_unreference(gobj);
361 mutex_unlock(&dev->struct_mutex);
362 return r; 344 return r;
363} 345}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index da3da1e89d00..4ae50c19589f 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "radeon_drm.h" 27#include "radeon_drm.h"
28#include "radeon.h" 28#include "radeon.h"
29#include "atom.h"
29 30
30/** 31/**
31 * radeon_ddc_probe 32 * radeon_ddc_probe
@@ -59,7 +60,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
59} 60}
60 61
61 62
62void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) 63static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
63{ 64{
64 struct radeon_device *rdev = i2c->dev->dev_private; 65 struct radeon_device *rdev = i2c->dev->dev_private;
65 struct radeon_i2c_bus_rec *rec = &i2c->rec; 66 struct radeon_i2c_bus_rec *rec = &i2c->rec;
@@ -71,13 +72,25 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
71 */ 72 */
72 if (rec->hw_capable) { 73 if (rec->hw_capable) {
73 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { 74 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
74 if (rec->a_clk_reg == RADEON_GPIO_MONID) { 75 u32 reg;
76
77 if (rdev->family >= CHIP_RV350)
78 reg = RADEON_GPIO_MONID;
79 else if ((rdev->family == CHIP_R300) ||
80 (rdev->family == CHIP_R350))
81 reg = RADEON_GPIO_DVI_DDC;
82 else
83 reg = RADEON_GPIO_CRT2_DDC;
84
85 mutex_lock(&rdev->dc_hw_i2c_mutex);
86 if (rec->a_clk_reg == reg) {
75 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 87 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
76 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); 88 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
77 } else { 89 } else {
78 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 90 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
79 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); 91 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
80 } 92 }
93 mutex_unlock(&rdev->dc_hw_i2c_mutex);
81 } 94 }
82 } 95 }
83 96
@@ -168,6 +181,692 @@ static void set_data(void *i2c_priv, int data)
168 WREG32(rec->en_data_reg, val); 181 WREG32(rec->en_data_reg, val);
169} 182}
170 183
184static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
185{
186 struct radeon_pll *spll = &rdev->clock.spll;
187 u32 sclk = radeon_get_engine_clock(rdev);
188 u32 prescale = 0;
189 u32 n, m;
190 u8 loop;
191 int i2c_clock;
192
193 switch (rdev->family) {
194 case CHIP_R100:
195 case CHIP_RV100:
196 case CHIP_RS100:
197 case CHIP_RV200:
198 case CHIP_RS200:
199 case CHIP_R200:
200 case CHIP_RV250:
201 case CHIP_RS300:
202 case CHIP_RV280:
203 case CHIP_R300:
204 case CHIP_R350:
205 case CHIP_RV350:
206 n = (spll->reference_freq) / (4 * 6);
207 for (loop = 1; loop < 255; loop++) {
208 if ((loop * (loop - 1)) > n)
209 break;
210 }
211 m = loop - 1;
212 prescale = m | (loop << 8);
213 break;
214 case CHIP_RV380:
215 case CHIP_RS400:
216 case CHIP_RS480:
217 case CHIP_R420:
218 case CHIP_R423:
219 case CHIP_RV410:
220 sclk = radeon_get_engine_clock(rdev);
221 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
222 break;
223 case CHIP_RS600:
224 case CHIP_RS690:
225 case CHIP_RS740:
226 /* todo */
227 break;
228 case CHIP_RV515:
229 case CHIP_R520:
230 case CHIP_RV530:
231 case CHIP_RV560:
232 case CHIP_RV570:
233 case CHIP_R580:
234 i2c_clock = 50;
235 sclk = radeon_get_engine_clock(rdev);
236 if (rdev->family == CHIP_R520)
237 prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
238 else
239 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
240 break;
241 case CHIP_R600:
242 case CHIP_RV610:
243 case CHIP_RV630:
244 case CHIP_RV670:
245 /* todo */
246 break;
247 case CHIP_RV620:
248 case CHIP_RV635:
249 case CHIP_RS780:
250 case CHIP_RS880:
251 case CHIP_RV770:
252 case CHIP_RV730:
253 case CHIP_RV710:
254 case CHIP_RV740:
255 /* todo */
256 break;
257 case CHIP_CEDAR:
258 case CHIP_REDWOOD:
259 case CHIP_JUNIPER:
260 case CHIP_CYPRESS:
261 case CHIP_HEMLOCK:
262 /* todo */
263 break;
264 default:
265 DRM_ERROR("i2c: unhandled radeon chip\n");
266 break;
267 }
268 return prescale;
269}
270
271
272/* hw i2c engine for r1xx-4xx hardware
273 * hw can buffer up to 15 bytes
274 */
275static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
276 struct i2c_msg *msgs, int num)
277{
278 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
279 struct radeon_device *rdev = i2c->dev->dev_private;
280 struct radeon_i2c_bus_rec *rec = &i2c->rec;
281 struct i2c_msg *p;
282 int i, j, k, ret = num;
283 u32 prescale;
284 u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
285 u32 tmp, reg;
286
287 mutex_lock(&rdev->dc_hw_i2c_mutex);
288 /* take the pm lock since we need a constant sclk */
289 mutex_lock(&rdev->pm.mutex);
290
291 prescale = radeon_get_i2c_prescale(rdev);
292
293 reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
294 RADEON_I2C_START |
295 RADEON_I2C_STOP |
296 RADEON_I2C_GO);
297
298 if (rdev->is_atom_bios) {
299 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
300 WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
301 }
302
303 if (rec->mm_i2c) {
304 i2c_cntl_0 = RADEON_I2C_CNTL_0;
305 i2c_cntl_1 = RADEON_I2C_CNTL_1;
306 i2c_data = RADEON_I2C_DATA;
307 } else {
308 i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
309 i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
310 i2c_data = RADEON_DVI_I2C_DATA;
311
312 switch (rdev->family) {
313 case CHIP_R100:
314 case CHIP_RV100:
315 case CHIP_RS100:
316 case CHIP_RV200:
317 case CHIP_RS200:
318 case CHIP_RS300:
319 switch (rec->mask_clk_reg) {
320 case RADEON_GPIO_DVI_DDC:
321 /* no gpio select bit */
322 break;
323 default:
324 DRM_ERROR("gpio not supported with hw i2c\n");
325 ret = -EINVAL;
326 goto done;
327 }
328 break;
329 case CHIP_R200:
330 /* only bit 4 on r200 */
331 switch (rec->mask_clk_reg) {
332 case RADEON_GPIO_DVI_DDC:
333 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
334 break;
335 case RADEON_GPIO_MONID:
336 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
337 break;
338 default:
339 DRM_ERROR("gpio not supported with hw i2c\n");
340 ret = -EINVAL;
341 goto done;
342 }
343 break;
344 case CHIP_RV250:
345 case CHIP_RV280:
346 /* bits 3 and 4 */
347 switch (rec->mask_clk_reg) {
348 case RADEON_GPIO_DVI_DDC:
349 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
350 break;
351 case RADEON_GPIO_VGA_DDC:
352 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
353 break;
354 case RADEON_GPIO_CRT2_DDC:
355 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
356 break;
357 default:
358 DRM_ERROR("gpio not supported with hw i2c\n");
359 ret = -EINVAL;
360 goto done;
361 }
362 break;
363 case CHIP_R300:
364 case CHIP_R350:
365 /* only bit 4 on r300/r350 */
366 switch (rec->mask_clk_reg) {
367 case RADEON_GPIO_VGA_DDC:
368 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
369 break;
370 case RADEON_GPIO_DVI_DDC:
371 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
372 break;
373 default:
374 DRM_ERROR("gpio not supported with hw i2c\n");
375 ret = -EINVAL;
376 goto done;
377 }
378 break;
379 case CHIP_RV350:
380 case CHIP_RV380:
381 case CHIP_R420:
382 case CHIP_R423:
383 case CHIP_RV410:
384 case CHIP_RS400:
385 case CHIP_RS480:
386 /* bits 3 and 4 */
387 switch (rec->mask_clk_reg) {
388 case RADEON_GPIO_VGA_DDC:
389 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
390 break;
391 case RADEON_GPIO_DVI_DDC:
392 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
393 break;
394 case RADEON_GPIO_MONID:
395 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
396 break;
397 default:
398 DRM_ERROR("gpio not supported with hw i2c\n");
399 ret = -EINVAL;
400 goto done;
401 }
402 break;
403 default:
404 DRM_ERROR("unsupported asic\n");
405 ret = -EINVAL;
406 goto done;
407 break;
408 }
409 }
410
411 /* check for bus probe */
412 p = &msgs[0];
413 if ((num == 1) && (p->len == 0)) {
414 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
415 RADEON_I2C_NACK |
416 RADEON_I2C_HALT |
417 RADEON_I2C_SOFT_RST));
418 WREG32(i2c_data, (p->addr << 1) & 0xff);
419 WREG32(i2c_data, 0);
420 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
421 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
422 RADEON_I2C_EN |
423 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
424 WREG32(i2c_cntl_0, reg);
425 for (k = 0; k < 32; k++) {
426 udelay(10);
427 tmp = RREG32(i2c_cntl_0);
428 if (tmp & RADEON_I2C_GO)
429 continue;
430 tmp = RREG32(i2c_cntl_0);
431 if (tmp & RADEON_I2C_DONE)
432 break;
433 else {
434 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
435 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
436 ret = -EIO;
437 goto done;
438 }
439 }
440 goto done;
441 }
442
443 for (i = 0; i < num; i++) {
444 p = &msgs[i];
445 for (j = 0; j < p->len; j++) {
446 if (p->flags & I2C_M_RD) {
447 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
448 RADEON_I2C_NACK |
449 RADEON_I2C_HALT |
450 RADEON_I2C_SOFT_RST));
451 WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
452 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
453 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
454 RADEON_I2C_EN |
455 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
456 WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
457 for (k = 0; k < 32; k++) {
458 udelay(10);
459 tmp = RREG32(i2c_cntl_0);
460 if (tmp & RADEON_I2C_GO)
461 continue;
462 tmp = RREG32(i2c_cntl_0);
463 if (tmp & RADEON_I2C_DONE)
464 break;
465 else {
466 DRM_DEBUG("i2c read error 0x%08x\n", tmp);
467 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
468 ret = -EIO;
469 goto done;
470 }
471 }
472 p->buf[j] = RREG32(i2c_data) & 0xff;
473 } else {
474 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
475 RADEON_I2C_NACK |
476 RADEON_I2C_HALT |
477 RADEON_I2C_SOFT_RST));
478 WREG32(i2c_data, (p->addr << 1) & 0xff);
479 WREG32(i2c_data, p->buf[j]);
480 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
481 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
482 RADEON_I2C_EN |
483 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
484 WREG32(i2c_cntl_0, reg);
485 for (k = 0; k < 32; k++) {
486 udelay(10);
487 tmp = RREG32(i2c_cntl_0);
488 if (tmp & RADEON_I2C_GO)
489 continue;
490 tmp = RREG32(i2c_cntl_0);
491 if (tmp & RADEON_I2C_DONE)
492 break;
493 else {
494 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
495 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
496 ret = -EIO;
497 goto done;
498 }
499 }
500 }
501 }
502 }
503
504done:
505 WREG32(i2c_cntl_0, 0);
506 WREG32(i2c_cntl_1, 0);
507 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
508 RADEON_I2C_NACK |
509 RADEON_I2C_HALT |
510 RADEON_I2C_SOFT_RST));
511
512 if (rdev->is_atom_bios) {
513 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
514 tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
515 WREG32(RADEON_BIOS_6_SCRATCH, tmp);
516 }
517
518 mutex_unlock(&rdev->pm.mutex);
519 mutex_unlock(&rdev->dc_hw_i2c_mutex);
520
521 return ret;
522}
523
524/* hw i2c engine for r5xx hardware
525 * hw can buffer up to 15 bytes
526 */
527static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
528 struct i2c_msg *msgs, int num)
529{
530 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
531 struct radeon_device *rdev = i2c->dev->dev_private;
532 struct radeon_i2c_bus_rec *rec = &i2c->rec;
533 struct i2c_msg *p;
534 int i, j, remaining, current_count, buffer_offset, ret = num;
535 u32 prescale;
536 u32 tmp, reg;
537 u32 saved1, saved2;
538
539 mutex_lock(&rdev->dc_hw_i2c_mutex);
540 /* take the pm lock since we need a constant sclk */
541 mutex_lock(&rdev->pm.mutex);
542
543 prescale = radeon_get_i2c_prescale(rdev);
544
545 /* clear gpio mask bits */
546 tmp = RREG32(rec->mask_clk_reg);
547 tmp &= ~rec->mask_clk_mask;
548 WREG32(rec->mask_clk_reg, tmp);
549 tmp = RREG32(rec->mask_clk_reg);
550
551 tmp = RREG32(rec->mask_data_reg);
552 tmp &= ~rec->mask_data_mask;
553 WREG32(rec->mask_data_reg, tmp);
554 tmp = RREG32(rec->mask_data_reg);
555
556 /* clear pin values */
557 tmp = RREG32(rec->a_clk_reg);
558 tmp &= ~rec->a_clk_mask;
559 WREG32(rec->a_clk_reg, tmp);
560 tmp = RREG32(rec->a_clk_reg);
561
562 tmp = RREG32(rec->a_data_reg);
563 tmp &= ~rec->a_data_mask;
564 WREG32(rec->a_data_reg, tmp);
565 tmp = RREG32(rec->a_data_reg);
566
567 /* set the pins to input */
568 tmp = RREG32(rec->en_clk_reg);
569 tmp &= ~rec->en_clk_mask;
570 WREG32(rec->en_clk_reg, tmp);
571 tmp = RREG32(rec->en_clk_reg);
572
573 tmp = RREG32(rec->en_data_reg);
574 tmp &= ~rec->en_data_mask;
575 WREG32(rec->en_data_reg, tmp);
576 tmp = RREG32(rec->en_data_reg);
577
578 /* */
579 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
580 WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
581 saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
582 saved2 = RREG32(0x494);
583 WREG32(0x494, saved2 | 0x1);
584
585 WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
586 for (i = 0; i < 50; i++) {
587 udelay(1);
588 if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
589 break;
590 }
591 if (i == 50) {
592 DRM_ERROR("failed to get i2c bus\n");
593 ret = -EBUSY;
594 goto done;
595 }
596
597 reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
598 switch (rec->mask_clk_reg) {
599 case AVIVO_DC_GPIO_DDC1_MASK:
600 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
601 break;
602 case AVIVO_DC_GPIO_DDC2_MASK:
603 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
604 break;
605 case AVIVO_DC_GPIO_DDC3_MASK:
606 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
607 break;
608 default:
609 DRM_ERROR("gpio not supported with hw i2c\n");
610 ret = -EINVAL;
611 goto done;
612 }
613
614 /* check for bus probe */
615 p = &msgs[0];
616 if ((num == 1) && (p->len == 0)) {
617 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
618 AVIVO_DC_I2C_NACK |
619 AVIVO_DC_I2C_HALT));
620 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
621 udelay(1);
622 WREG32(AVIVO_DC_I2C_RESET, 0);
623
624 WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
625 WREG32(AVIVO_DC_I2C_DATA, 0);
626
627 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
628 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
629 AVIVO_DC_I2C_DATA_COUNT(1) |
630 (prescale << 16)));
631 WREG32(AVIVO_DC_I2C_CONTROL1, reg);
632 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
633 for (j = 0; j < 200; j++) {
634 udelay(50);
635 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
636 if (tmp & AVIVO_DC_I2C_GO)
637 continue;
638 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
639 if (tmp & AVIVO_DC_I2C_DONE)
640 break;
641 else {
642 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
643 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
644 ret = -EIO;
645 goto done;
646 }
647 }
648 goto done;
649 }
650
651 for (i = 0; i < num; i++) {
652 p = &msgs[i];
653 remaining = p->len;
654 buffer_offset = 0;
655 if (p->flags & I2C_M_RD) {
656 while (remaining) {
657 if (remaining > 15)
658 current_count = 15;
659 else
660 current_count = remaining;
661 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
662 AVIVO_DC_I2C_NACK |
663 AVIVO_DC_I2C_HALT));
664 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
665 udelay(1);
666 WREG32(AVIVO_DC_I2C_RESET, 0);
667
668 WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
669 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
670 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
671 AVIVO_DC_I2C_DATA_COUNT(current_count) |
672 (prescale << 16)));
673 WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
674 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
675 for (j = 0; j < 200; j++) {
676 udelay(50);
677 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
678 if (tmp & AVIVO_DC_I2C_GO)
679 continue;
680 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
681 if (tmp & AVIVO_DC_I2C_DONE)
682 break;
683 else {
684 DRM_DEBUG("i2c read error 0x%08x\n", tmp);
685 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
686 ret = -EIO;
687 goto done;
688 }
689 }
690 for (j = 0; j < current_count; j++)
691 p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
692 remaining -= current_count;
693 buffer_offset += current_count;
694 }
695 } else {
696 while (remaining) {
697 if (remaining > 15)
698 current_count = 15;
699 else
700 current_count = remaining;
701 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
702 AVIVO_DC_I2C_NACK |
703 AVIVO_DC_I2C_HALT));
704 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
705 udelay(1);
706 WREG32(AVIVO_DC_I2C_RESET, 0);
707
708 WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
709 for (j = 0; j < current_count; j++)
710 WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
711
712 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
713 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
714 AVIVO_DC_I2C_DATA_COUNT(current_count) |
715 (prescale << 16)));
716 WREG32(AVIVO_DC_I2C_CONTROL1, reg);
717 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
718 for (j = 0; j < 200; j++) {
719 udelay(50);
720 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
721 if (tmp & AVIVO_DC_I2C_GO)
722 continue;
723 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
724 if (tmp & AVIVO_DC_I2C_DONE)
725 break;
726 else {
727 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
728 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
729 ret = -EIO;
730 goto done;
731 }
732 }
733 remaining -= current_count;
734 buffer_offset += current_count;
735 }
736 }
737 }
738
739done:
740 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
741 AVIVO_DC_I2C_NACK |
742 AVIVO_DC_I2C_HALT));
743 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
744 udelay(1);
745 WREG32(AVIVO_DC_I2C_RESET, 0);
746
747 WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
748 WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
749 WREG32(0x494, saved2);
750 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
751 tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
752 WREG32(RADEON_BIOS_6_SCRATCH, tmp);
753
754 mutex_unlock(&rdev->pm.mutex);
755 mutex_unlock(&rdev->dc_hw_i2c_mutex);
756
757 return ret;
758}
759
760static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
761 struct i2c_msg *msgs, int num)
762{
763 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
764 int ret;
765
766 radeon_i2c_do_lock(i2c, 1);
767 ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
768 radeon_i2c_do_lock(i2c, 0);
769
770 return ret;
771}
772
773static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
774 struct i2c_msg *msgs, int num)
775{
776 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
777 struct radeon_device *rdev = i2c->dev->dev_private;
778 struct radeon_i2c_bus_rec *rec = &i2c->rec;
779 int ret;
780
781 switch (rdev->family) {
782 case CHIP_R100:
783 case CHIP_RV100:
784 case CHIP_RS100:
785 case CHIP_RV200:
786 case CHIP_RS200:
787 case CHIP_R200:
788 case CHIP_RV250:
789 case CHIP_RS300:
790 case CHIP_RV280:
791 case CHIP_R300:
792 case CHIP_R350:
793 case CHIP_RV350:
794 case CHIP_RV380:
795 case CHIP_R420:
796 case CHIP_R423:
797 case CHIP_RV410:
798 case CHIP_RS400:
799 case CHIP_RS480:
800 if (rec->hw_capable)
801 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
802 else
803 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
804 break;
805 case CHIP_RS600:
806 case CHIP_RS690:
807 case CHIP_RS740:
808 /* XXX fill in hw i2c implementation */
809 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
810 break;
811 case CHIP_RV515:
812 case CHIP_R520:
813 case CHIP_RV530:
814 case CHIP_RV560:
815 case CHIP_RV570:
816 case CHIP_R580:
817 if (rec->hw_capable) {
818 if (rec->mm_i2c)
819 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
820 else
821 ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
822 } else
823 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
824 break;
825 case CHIP_R600:
826 case CHIP_RV610:
827 case CHIP_RV630:
828 case CHIP_RV670:
829 /* XXX fill in hw i2c implementation */
830 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
831 break;
832 case CHIP_RV620:
833 case CHIP_RV635:
834 case CHIP_RS780:
835 case CHIP_RS880:
836 case CHIP_RV770:
837 case CHIP_RV730:
838 case CHIP_RV710:
839 case CHIP_RV740:
840 /* XXX fill in hw i2c implementation */
841 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
842 break;
843 case CHIP_CEDAR:
844 case CHIP_REDWOOD:
845 case CHIP_JUNIPER:
846 case CHIP_CYPRESS:
847 case CHIP_HEMLOCK:
848 /* XXX fill in hw i2c implementation */
849 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
850 break;
851 default:
852 DRM_ERROR("i2c: unhandled radeon chip\n");
853 ret = -EIO;
854 break;
855 }
856
857 return ret;
858}
859
860static u32 radeon_i2c_func(struct i2c_adapter *adap)
861{
862 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
863}
864
865static const struct i2c_algorithm radeon_i2c_algo = {
866 .master_xfer = radeon_i2c_xfer,
867 .functionality = radeon_i2c_func,
868};
869
171struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 870struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
172 struct radeon_i2c_bus_rec *rec, 871 struct radeon_i2c_bus_rec *rec,
173 const char *name) 872 const char *name)
@@ -179,23 +878,36 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
179 if (i2c == NULL) 878 if (i2c == NULL)
180 return NULL; 879 return NULL;
181 880
182 i2c->adapter.owner = THIS_MODULE; 881 /* set the internal bit adapter */
183 i2c->dev = dev; 882 i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
184 i2c_set_adapdata(&i2c->adapter, i2c); 883 i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
185 i2c->adapter.algo_data = &i2c->algo.bit; 884 sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
186 i2c->algo.bit.setsda = set_data; 885 i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
187 i2c->algo.bit.setscl = set_clock; 886 i2c->algo.radeon.bit_data.setsda = set_data;
188 i2c->algo.bit.getsda = get_data; 887 i2c->algo.radeon.bit_data.setscl = set_clock;
189 i2c->algo.bit.getscl = get_clock; 888 i2c->algo.radeon.bit_data.getsda = get_data;
190 i2c->algo.bit.udelay = 20; 889 i2c->algo.radeon.bit_data.getscl = get_clock;
890 i2c->algo.radeon.bit_data.udelay = 20;
191 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always 891 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
192 * make this, 2 jiffies is a lot more reliable */ 892 * make this, 2 jiffies is a lot more reliable */
193 i2c->algo.bit.timeout = 2; 893 i2c->algo.radeon.bit_data.timeout = 2;
194 i2c->algo.bit.data = i2c; 894 i2c->algo.radeon.bit_data.data = i2c;
895 ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
896 if (ret) {
897 DRM_ERROR("Failed to register internal bit i2c %s\n", name);
898 goto out_free;
899 }
900 /* set the radeon i2c adapter */
901 i2c->dev = dev;
195 i2c->rec = *rec; 902 i2c->rec = *rec;
196 ret = i2c_bit_add_bus(&i2c->adapter); 903 i2c->adapter.owner = THIS_MODULE;
904 i2c_set_adapdata(&i2c->adapter, i2c);
905 sprintf(i2c->adapter.name, "Radeon i2c %s", name);
906 i2c->adapter.algo_data = &i2c->algo.radeon;
907 i2c->adapter.algo = &radeon_i2c_algo;
908 ret = i2c_add_adapter(&i2c->adapter);
197 if (ret) { 909 if (ret) {
198 DRM_INFO("Failed to register i2c %s\n", name); 910 DRM_ERROR("Failed to register i2c %s\n", name);
199 goto out_free; 911 goto out_free;
200 } 912 }
201 913
@@ -237,11 +949,19 @@ out_free:
237 949
238} 950}
239 951
240
241void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) 952void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
242{ 953{
243 if (!i2c) 954 if (!i2c)
244 return; 955 return;
956 i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
957 i2c_del_adapter(&i2c->adapter);
958 kfree(i2c);
959}
960
961void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
962{
963 if (!i2c)
964 return;
245 965
246 i2c_del_adapter(&i2c->adapter); 966 i2c_del_adapter(&i2c->adapter);
247 kfree(i2c); 967 kfree(i2c);
@@ -252,10 +972,10 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
252 return NULL; 972 return NULL;
253} 973}
254 974
255void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, 975void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
256 u8 slave_addr, 976 u8 slave_addr,
257 u8 addr, 977 u8 addr,
258 u8 *val) 978 u8 *val)
259{ 979{
260 u8 out_buf[2]; 980 u8 out_buf[2];
261 u8 in_buf[2]; 981 u8 in_buf[2];
@@ -286,10 +1006,10 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
286 } 1006 }
287} 1007}
288 1008
289void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus, 1009void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
290 u8 slave_addr, 1010 u8 slave_addr,
291 u8 addr, 1011 u8 addr,
292 u8 val) 1012 u8 val)
293{ 1013{
294 uint8_t out_buf[2]; 1014 uint8_t out_buf[2];
295 struct i2c_msg msg = { 1015 struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f23b05606eb5..20ec276e7596 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,6 +30,8 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32 32
33#include <linux/vga_switcheroo.h>
34
33int radeon_driver_unload_kms(struct drm_device *dev) 35int radeon_driver_unload_kms(struct drm_device *dev)
34{ 36{
35 struct radeon_device *rdev = dev->dev_private; 37 struct radeon_device *rdev = dev->dev_private;
@@ -136,6 +138,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
136 138
137void radeon_driver_lastclose_kms(struct drm_device *dev) 139void radeon_driver_lastclose_kms(struct drm_device *dev)
138{ 140{
141 vga_switcheroo_process_delayed_switch();
139} 142}
140 143
141int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 144int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
@@ -276,17 +279,17 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
276 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 279 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
277 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 280 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
278 /* KMS */ 281 /* KMS */
279 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH), 282 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
280 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH), 283 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
281 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH), 284 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
282 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH), 285 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
283 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), 286 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
284 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), 287 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
285 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), 288 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
286 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), 289 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
287 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), 290 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
288 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), 291 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
289 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), 292 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
290 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH), 293 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
291}; 294};
292int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 295int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b6d8081e1246..df23d6a01d02 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -403,7 +403,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
403 403
404 /* if scanout was in GTT this really wouldn't work */ 404 /* if scanout was in GTT this really wouldn't work */
405 /* crtc offset is from display base addr not FB location */ 405 /* crtc offset is from display base addr not FB location */
406 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; 406 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
407 407
408 base -= radeon_crtc->legacy_display_base_addr; 408 base -= radeon_crtc->legacy_display_base_addr;
409 409
@@ -582,29 +582,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
582 ? RADEON_CRTC_V_SYNC_POL 582 ? RADEON_CRTC_V_SYNC_POL
583 : 0)); 583 : 0));
584 584
585 /* TODO -> Dell Server */
586 if (0) {
587 uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
588 uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
589 uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
590 uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
591
592 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
593 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
594
595 /* For CRT on DAC2, don't turn it on if BIOS didn't
596 enable it, even it's detected.
597 */
598 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
599 tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
600 tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
601
602 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
603 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
604 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
605 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
606 }
607
608 if (radeon_crtc->crtc_id) { 585 if (radeon_crtc->crtc_id) {
609 uint32_t crtc2_gen_cntl; 586 uint32_t crtc2_gen_cntl;
610 uint32_t disp2_merge_cntl; 587 uint32_t disp2_merge_cntl;
@@ -726,6 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
726 pll = &rdev->clock.p1pll; 703 pll = &rdev->clock.p1pll;
727 704
728 pll->flags = RADEON_PLL_LEGACY; 705 pll->flags = RADEON_PLL_LEGACY;
706 if (radeon_new_pll == 1)
707 pll->algo = PLL_ALGO_NEW;
708 else
709 pll->algo = PLL_ALGO_LEGACY;
729 710
730 if (mode->clock > 200000) /* range limits??? */ 711 if (mode->clock > 200000) /* range limits??? */
731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 712 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 38e45e231ef5..cf389ce50a8a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -115,6 +115,9 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
115 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 115 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
116 else 116 else
117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
118
119 /* adjust pm to dpms change */
120 radeon_pm_compute_clocks(rdev);
118} 121}
119 122
120static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) 123static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -214,6 +217,11 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
214 struct drm_display_mode *adjusted_mode) 217 struct drm_display_mode *adjusted_mode)
215{ 218{
216 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 219 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
220 struct drm_device *dev = encoder->dev;
221 struct radeon_device *rdev = dev->dev_private;
222
223 /* adjust pm to upcoming mode change */
224 radeon_pm_compute_clocks(rdev);
217 225
218 /* set the active encoder to connector routing */ 226 /* set the active encoder to connector routing */
219 radeon_encoder_set_active_device(encoder); 227 radeon_encoder_set_active_device(encoder);
@@ -285,6 +293,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
285 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 293 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
286 else 294 else
287 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 295 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
296
297 /* adjust pm to dpms change */
298 radeon_pm_compute_clocks(rdev);
288} 299}
289 300
290static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) 301static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -470,6 +481,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
470 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 481 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
471 else 482 else
472 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 483 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
484
485 /* adjust pm to dpms change */
486 radeon_pm_compute_clocks(rdev);
473} 487}
474 488
475static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) 489static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -635,6 +649,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
635 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 649 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
636 else 650 else
637 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 651 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
652
653 /* adjust pm to dpms change */
654 radeon_pm_compute_clocks(rdev);
638} 655}
639 656
640static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) 657static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -842,6 +859,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
842 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 859 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
843 else 860 else
844 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 861 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
862
863 /* adjust pm to dpms change */
864 radeon_pm_compute_clocks(rdev);
845} 865}
846 866
847static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) 867static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e81b2aeb6a8f..1702b820aa4d 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec {
83 bool valid; 83 bool valid;
84 /* id used by atom */ 84 /* id used by atom */
85 uint8_t i2c_id; 85 uint8_t i2c_id;
86 /* id used by atom */
87 uint8_t hpd_id;
86 /* can be used with hw i2c engine */ 88 /* can be used with hw i2c engine */
87 bool hw_capable; 89 bool hw_capable;
88 /* uses multi-media i2c engine */ 90 /* uses multi-media i2c engine */
@@ -113,6 +115,7 @@ struct radeon_tmds_pll {
113 115
114#define RADEON_MAX_BIOS_CONNECTOR 16 116#define RADEON_MAX_BIOS_CONNECTOR 16
115 117
118/* pll flags */
116#define RADEON_PLL_USE_BIOS_DIVS (1 << 0) 119#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
117#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) 120#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
118#define RADEON_PLL_USE_REF_DIV (1 << 2) 121#define RADEON_PLL_USE_REF_DIV (1 << 2)
@@ -127,6 +130,12 @@ struct radeon_tmds_pll {
127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12) 131#define RADEON_PLL_USE_POST_DIV (1 << 12)
129 132
133/* pll algo */
134enum radeon_pll_algo {
135 PLL_ALGO_LEGACY,
136 PLL_ALGO_NEW
137};
138
130struct radeon_pll { 139struct radeon_pll {
131 /* reference frequency */ 140 /* reference frequency */
132 uint32_t reference_freq; 141 uint32_t reference_freq;
@@ -157,6 +166,13 @@ struct radeon_pll {
157 166
158 /* pll id */ 167 /* pll id */
159 uint32_t id; 168 uint32_t id;
169 /* pll algo */
170 enum radeon_pll_algo algo;
171};
172
173struct i2c_algo_radeon_data {
174 struct i2c_adapter bit_adapter;
175 struct i2c_algo_bit_data bit_data;
160}; 176};
161 177
162struct radeon_i2c_chan { 178struct radeon_i2c_chan {
@@ -164,7 +180,7 @@ struct radeon_i2c_chan {
164 struct drm_device *dev; 180 struct drm_device *dev;
165 union { 181 union {
166 struct i2c_algo_dp_aux_data dp; 182 struct i2c_algo_dp_aux_data dp;
167 struct i2c_algo_bit_data bit; 183 struct i2c_algo_radeon_data radeon;
168 } algo; 184 } algo;
169 struct radeon_i2c_bus_rec rec; 185 struct radeon_i2c_bus_rec rec;
170}; 186};
@@ -193,7 +209,7 @@ struct radeon_mode_info {
193 struct card_info *atom_card_info; 209 struct card_info *atom_card_info;
194 enum radeon_connector_table connector_table; 210 enum radeon_connector_table connector_table;
195 bool mode_config_initialized; 211 bool mode_config_initialized;
196 struct radeon_crtc *crtcs[2]; 212 struct radeon_crtc *crtcs[6];
197 /* DVI-I properties */ 213 /* DVI-I properties */
198 struct drm_property *coherent_mode_property; 214 struct drm_property *coherent_mode_property;
199 /* DAC enable load detect */ 215 /* DAC enable load detect */
@@ -202,7 +218,8 @@ struct radeon_mode_info {
202 struct drm_property *tv_std_property; 218 struct drm_property *tv_std_property;
203 /* legacy TMDS PLL detect */ 219 /* legacy TMDS PLL detect */
204 struct drm_property *tmds_pll_property; 220 struct drm_property *tmds_pll_property;
205 221 /* hardcoded DFP edid from BIOS */
222 struct edid *bios_hardcoded_edid;
206}; 223};
207 224
208#define MAX_H_CODE_TIMING_LEN 32 225#define MAX_H_CODE_TIMING_LEN 32
@@ -237,6 +254,7 @@ struct radeon_crtc {
237 fixed20_12 vsc; 254 fixed20_12 vsc;
238 fixed20_12 hsc; 255 fixed20_12 hsc;
239 struct drm_display_mode native_mode; 256 struct drm_display_mode native_mode;
257 int pll_id;
240}; 258};
241 259
242struct radeon_encoder_primary_dac { 260struct radeon_encoder_primary_dac {
@@ -303,6 +321,7 @@ struct radeon_encoder_atom_dig {
303 /* atom lvds */ 321 /* atom lvds */
304 uint32_t lvds_misc; 322 uint32_t lvds_misc;
305 uint16_t panel_pwr_delay; 323 uint16_t panel_pwr_delay;
324 enum radeon_pll_algo pll_algo;
306 struct radeon_atom_ss *ss; 325 struct radeon_atom_ss *ss;
307 /* panel mode */ 326 /* panel mode */
308 struct drm_display_mode native_mode; 327 struct drm_display_mode native_mode;
@@ -398,6 +417,7 @@ extern void dp_link_train(struct drm_encoder *encoder,
398 struct drm_connector *connector); 417 struct drm_connector *connector);
399extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 418extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
400extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 419extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
420extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
401extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 421extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
402 int action, uint8_t lane_num, 422 int action, uint8_t lane_num,
403 uint8_t lane_set); 423 uint8_t lane_set);
@@ -411,14 +431,15 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
411 struct radeon_i2c_bus_rec *rec, 431 struct radeon_i2c_bus_rec *rec,
412 const char *name); 432 const char *name);
413extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); 433extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
414extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, 434extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
415 u8 slave_addr, 435extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
416 u8 addr, 436 u8 slave_addr,
417 u8 *val); 437 u8 addr,
418extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c, 438 u8 *val);
419 u8 slave_addr, 439extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
420 u8 addr, 440 u8 slave_addr,
421 u8 val); 441 u8 addr,
442 u8 val);
422extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 443extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
423extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 444extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
424 445
@@ -432,14 +453,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
432 uint32_t *ref_div_p, 453 uint32_t *ref_div_p,
433 uint32_t *post_div_p); 454 uint32_t *post_div_p);
434 455
435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
436 uint64_t freq,
437 uint32_t *dot_clock_p,
438 uint32_t *fb_div_p,
439 uint32_t *frac_fb_div_p,
440 uint32_t *ref_div_p,
441 uint32_t *post_div_p);
442
443extern void radeon_setup_encoder_clones(struct drm_device *dev); 456extern void radeon_setup_encoder_clones(struct drm_device *dev);
444 457
445struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); 458struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
@@ -473,6 +486,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
473extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, 486extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
474 int x, int y); 487 int x, int y);
475 488
489extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
490extern struct edid *
491radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
476extern bool radeon_atom_get_clock_info(struct drm_device *dev); 492extern bool radeon_atom_get_clock_info(struct drm_device *dev);
477extern bool radeon_combios_get_clock_info(struct drm_device *dev); 493extern bool radeon_combios_get_clock_info(struct drm_device *dev);
478extern struct radeon_encoder_atom_dig * 494extern struct radeon_encoder_atom_dig *
@@ -531,7 +547,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
531 struct radeon_crtc *radeon_crtc); 547 struct radeon_crtc *radeon_crtc);
532void radeon_legacy_init_crtc(struct drm_device *dev, 548void radeon_legacy_init_crtc(struct drm_device *dev,
533 struct radeon_crtc *radeon_crtc); 549 struct radeon_crtc *radeon_crtc);
534extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
535 550
536void radeon_get_clock_info(struct drm_device *dev); 551void radeon_get_clock_info(struct drm_device *dev);
537 552
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index f1da370928eb..fc9d00ac6b15 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -178,7 +178,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
178{ 178{
179 int r, i; 179 int r, i;
180 180
181 radeon_ttm_placement_from_domain(bo, domain);
182 if (bo->pin_count) { 181 if (bo->pin_count) {
183 bo->pin_count++; 182 bo->pin_count++;
184 if (gpu_addr) 183 if (gpu_addr)
@@ -186,6 +185,8 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
186 return 0; 185 return 0;
187 } 186 }
188 radeon_ttm_placement_from_domain(bo, domain); 187 radeon_ttm_placement_from_domain(bo, domain);
188 /* force to pin into visible video ram */
189 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
189 for (i = 0; i < bo->placement.num_placement; i++) 190 for (i = 0; i < bo->placement.num_placement; i++)
190 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 191 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
191 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 192 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc320..d4d1c39a0e99 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -18,21 +18,413 @@
18 * OTHER DEALINGS IN THE SOFTWARE. 18 * OTHER DEALINGS IN THE SOFTWARE.
19 * 19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com> 20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
21 */ 22 */
22#include "drmP.h" 23#include "drmP.h"
23#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h"
24 26
25int radeon_debugfs_pm_init(struct radeon_device *rdev); 27#define RADEON_IDLE_LOOP_MS 100
28#define RADEON_RECLOCK_DELAY_MS 200
29#define RADEON_WAIT_VBLANK_TIMEOUT 200
30
31static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
32static void radeon_pm_set_clocks(struct radeon_device *rdev);
33static void radeon_pm_idle_work_handler(struct work_struct *work);
34static int radeon_debugfs_pm_init(struct radeon_device *rdev);
35
36static const char *pm_state_names[4] = {
37 "PM_STATE_DISABLED",
38 "PM_STATE_MINIMUM",
39 "PM_STATE_PAUSED",
40 "PM_STATE_ACTIVE"
41};
42
43static const char *pm_state_types[5] = {
44 "Default",
45 "Powersave",
46 "Battery",
47 "Balanced",
48 "Performance",
49};
50
51static void radeon_print_power_mode_info(struct radeon_device *rdev)
52{
53 int i, j;
54 bool is_default;
55
56 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
59 is_default = true;
60 else
61 is_default = false;
62 DRM_INFO("State %d %s %s\n", i,
63 pm_state_types[rdev->pm.power_state[i].type],
64 is_default ? "(default)" : "");
65 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
66 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
67 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
68 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
69 if (rdev->flags & RADEON_IS_IGP)
70 DRM_INFO("\t\t%d engine: %d\n",
71 j,
72 rdev->pm.power_state[i].clock_info[j].sclk * 10);
73 else
74 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
75 j,
76 rdev->pm.power_state[i].clock_info[j].sclk * 10,
77 rdev->pm.power_state[i].clock_info[j].mclk * 10);
78 }
79 }
80}
81
82static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
83 enum radeon_pm_state_type type)
84{
85 int i, j;
86 enum radeon_pm_state_type wanted_types[2];
87 int wanted_count;
88
89 switch (type) {
90 case POWER_STATE_TYPE_DEFAULT:
91 default:
92 return rdev->pm.default_power_state;
93 case POWER_STATE_TYPE_POWERSAVE:
94 if (rdev->flags & RADEON_IS_MOBILITY) {
95 wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
96 wanted_types[1] = POWER_STATE_TYPE_BATTERY;
97 wanted_count = 2;
98 } else {
99 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
100 wanted_count = 1;
101 }
102 break;
103 case POWER_STATE_TYPE_BATTERY:
104 if (rdev->flags & RADEON_IS_MOBILITY) {
105 wanted_types[0] = POWER_STATE_TYPE_BATTERY;
106 wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
107 wanted_count = 2;
108 } else {
109 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
110 wanted_count = 1;
111 }
112 break;
113 case POWER_STATE_TYPE_BALANCED:
114 case POWER_STATE_TYPE_PERFORMANCE:
115 wanted_types[0] = type;
116 wanted_count = 1;
117 break;
118 }
119
120 for (i = 0; i < wanted_count; i++) {
121 for (j = 0; j < rdev->pm.num_power_states; j++) {
122 if (rdev->pm.power_state[j].type == wanted_types[i])
123 return &rdev->pm.power_state[j];
124 }
125 }
126
127 return rdev->pm.default_power_state;
128}
129
130static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
131 struct radeon_power_state *power_state,
132 enum radeon_pm_clock_mode_type type)
133{
134 switch (type) {
135 case POWER_MODE_TYPE_DEFAULT:
136 default:
137 return power_state->default_clock_mode;
138 case POWER_MODE_TYPE_LOW:
139 return &power_state->clock_info[0];
140 case POWER_MODE_TYPE_MID:
141 if (power_state->num_clock_modes > 2)
142 return &power_state->clock_info[1];
143 else
144 return &power_state->clock_info[0];
145 break;
146 case POWER_MODE_TYPE_HIGH:
147 return &power_state->clock_info[power_state->num_clock_modes - 1];
148 }
149
150}
151
152static void radeon_get_power_state(struct radeon_device *rdev,
153 enum radeon_pm_action action)
154{
155 switch (action) {
156 case PM_ACTION_MINIMUM:
157 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
158 rdev->pm.requested_clock_mode =
159 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
160 break;
161 case PM_ACTION_DOWNCLOCK:
162 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
163 rdev->pm.requested_clock_mode =
164 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
165 break;
166 case PM_ACTION_UPCLOCK:
167 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
168 rdev->pm.requested_clock_mode =
169 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
170 break;
171 case PM_ACTION_NONE:
172 default:
173 DRM_ERROR("Requested mode for not defined action\n");
174 return;
175 }
176 DRM_INFO("Requested: e: %d m: %d p: %d\n",
177 rdev->pm.requested_clock_mode->sclk,
178 rdev->pm.requested_clock_mode->mclk,
179 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
180}
181
182static void radeon_set_power_state(struct radeon_device *rdev)
183{
184 /* if *_clock_mode are the same, *_power_state are as well */
185 if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
186 return;
187
188 DRM_INFO("Setting: e: %d m: %d p: %d\n",
189 rdev->pm.requested_clock_mode->sclk,
190 rdev->pm.requested_clock_mode->mclk,
191 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
192 /* set pcie lanes */
193 /* set voltage */
194 /* set engine clock */
195 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
196 /* set memory clock */
197
198 rdev->pm.current_power_state = rdev->pm.requested_power_state;
199 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
200}
26 201
27int radeon_pm_init(struct radeon_device *rdev) 202int radeon_pm_init(struct radeon_device *rdev)
28{ 203{
204 rdev->pm.state = PM_STATE_DISABLED;
205 rdev->pm.planned_action = PM_ACTION_NONE;
206 rdev->pm.downclocked = false;
207
208 if (rdev->bios) {
209 if (rdev->is_atom_bios)
210 radeon_atombios_get_power_modes(rdev);
211 else
212 radeon_combios_get_power_modes(rdev);
213 radeon_print_power_mode_info(rdev);
214 }
215
29 if (radeon_debugfs_pm_init(rdev)) { 216 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for PM!\n"); 217 DRM_ERROR("Failed to register debugfs file for PM!\n");
31 } 218 }
32 219
220 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
221
222 if (radeon_dynpm != -1 && radeon_dynpm) {
223 rdev->pm.state = PM_STATE_PAUSED;
224 DRM_INFO("radeon: dynamic power management enabled\n");
225 }
226
227 DRM_INFO("radeon: power management initialized\n");
228
33 return 0; 229 return 0;
34} 230}
35 231
232void radeon_pm_compute_clocks(struct radeon_device *rdev)
233{
234 struct drm_device *ddev = rdev->ddev;
235 struct drm_connector *connector;
236 struct radeon_crtc *radeon_crtc;
237 int count = 0;
238
239 if (rdev->pm.state == PM_STATE_DISABLED)
240 return;
241
242 mutex_lock(&rdev->pm.mutex);
243
244 rdev->pm.active_crtcs = 0;
245 list_for_each_entry(connector,
246 &ddev->mode_config.connector_list, head) {
247 if (connector->encoder &&
248 connector->dpms != DRM_MODE_DPMS_OFF) {
249 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
250 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
251 ++count;
252 }
253 }
254
255 if (count > 1) {
256 if (rdev->pm.state == PM_STATE_ACTIVE) {
257 cancel_delayed_work(&rdev->pm.idle_work);
258
259 rdev->pm.state = PM_STATE_PAUSED;
260 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
261 if (rdev->pm.downclocked)
262 radeon_pm_set_clocks(rdev);
263
264 DRM_DEBUG("radeon: dynamic power management deactivated\n");
265 }
266 } else if (count == 1) {
267 /* TODO: Increase clocks if needed for current mode */
268
269 if (rdev->pm.state == PM_STATE_MINIMUM) {
270 rdev->pm.state = PM_STATE_ACTIVE;
271 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
272 radeon_pm_set_clocks(rdev);
273
274 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
275 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
276 }
277 else if (rdev->pm.state == PM_STATE_PAUSED) {
278 rdev->pm.state = PM_STATE_ACTIVE;
279 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
280 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
281 DRM_DEBUG("radeon: dynamic power management activated\n");
282 }
283 }
284 else { /* count == 0 */
285 if (rdev->pm.state != PM_STATE_MINIMUM) {
286 cancel_delayed_work(&rdev->pm.idle_work);
287
288 rdev->pm.state = PM_STATE_MINIMUM;
289 rdev->pm.planned_action = PM_ACTION_MINIMUM;
290 radeon_pm_set_clocks(rdev);
291 }
292 }
293
294 mutex_unlock(&rdev->pm.mutex);
295}
296
297static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
298{
299 u32 stat_crtc1 = 0, stat_crtc2 = 0;
300 bool in_vbl = true;
301
302 if (ASIC_IS_AVIVO(rdev)) {
303 if (rdev->pm.active_crtcs & (1 << 0)) {
304 stat_crtc1 = RREG32(D1CRTC_STATUS);
305 if (!(stat_crtc1 & 1))
306 in_vbl = false;
307 }
308 if (rdev->pm.active_crtcs & (1 << 1)) {
309 stat_crtc2 = RREG32(D2CRTC_STATUS);
310 if (!(stat_crtc2 & 1))
311 in_vbl = false;
312 }
313 }
314 if (in_vbl == false)
315 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
316 stat_crtc2, finish ? "exit" : "entry");
317 return in_vbl;
318}
319static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
320{
321 /*radeon_fence_wait_last(rdev);*/
322 switch (rdev->pm.planned_action) {
323 case PM_ACTION_UPCLOCK:
324 rdev->pm.downclocked = false;
325 break;
326 case PM_ACTION_DOWNCLOCK:
327 rdev->pm.downclocked = true;
328 break;
329 case PM_ACTION_MINIMUM:
330 break;
331 case PM_ACTION_NONE:
332 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
333 break;
334 }
335
336 /* check if we are in vblank */
337 radeon_pm_debug_check_in_vbl(rdev, false);
338 radeon_set_power_state(rdev);
339 radeon_pm_debug_check_in_vbl(rdev, true);
340 rdev->pm.planned_action = PM_ACTION_NONE;
341}
342
343static void radeon_pm_set_clocks(struct radeon_device *rdev)
344{
345 radeon_get_power_state(rdev, rdev->pm.planned_action);
346 mutex_lock(&rdev->cp.mutex);
347
348 if (rdev->pm.active_crtcs & (1 << 0)) {
349 rdev->pm.req_vblank |= (1 << 0);
350 drm_vblank_get(rdev->ddev, 0);
351 }
352 if (rdev->pm.active_crtcs & (1 << 1)) {
353 rdev->pm.req_vblank |= (1 << 1);
354 drm_vblank_get(rdev->ddev, 1);
355 }
356 if (rdev->pm.active_crtcs)
357 wait_event_interruptible_timeout(
358 rdev->irq.vblank_queue, 0,
359 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
360 if (rdev->pm.req_vblank & (1 << 0)) {
361 rdev->pm.req_vblank &= ~(1 << 0);
362 drm_vblank_put(rdev->ddev, 0);
363 }
364 if (rdev->pm.req_vblank & (1 << 1)) {
365 rdev->pm.req_vblank &= ~(1 << 1);
366 drm_vblank_put(rdev->ddev, 1);
367 }
368
369 radeon_pm_set_clocks_locked(rdev);
370 mutex_unlock(&rdev->cp.mutex);
371}
372
373static void radeon_pm_idle_work_handler(struct work_struct *work)
374{
375 struct radeon_device *rdev;
376 rdev = container_of(work, struct radeon_device,
377 pm.idle_work.work);
378
379 mutex_lock(&rdev->pm.mutex);
380 if (rdev->pm.state == PM_STATE_ACTIVE) {
381 unsigned long irq_flags;
382 int not_processed = 0;
383
384 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
385 if (!list_empty(&rdev->fence_drv.emited)) {
386 struct list_head *ptr;
387 list_for_each(ptr, &rdev->fence_drv.emited) {
388 /* count up to 3, that's enought info */
389 if (++not_processed >= 3)
390 break;
391 }
392 }
393 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
394
395 if (not_processed >= 3) { /* should upclock */
396 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
397 rdev->pm.planned_action = PM_ACTION_NONE;
398 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
399 rdev->pm.downclocked) {
400 rdev->pm.planned_action =
401 PM_ACTION_UPCLOCK;
402 rdev->pm.action_timeout = jiffies +
403 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
404 }
405 } else if (not_processed == 0) { /* should downclock */
406 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
407 rdev->pm.planned_action = PM_ACTION_NONE;
408 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
409 !rdev->pm.downclocked) {
410 rdev->pm.planned_action =
411 PM_ACTION_DOWNCLOCK;
412 rdev->pm.action_timeout = jiffies +
413 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
414 }
415 }
416
417 if (rdev->pm.planned_action != PM_ACTION_NONE &&
418 jiffies > rdev->pm.action_timeout) {
419 radeon_pm_set_clocks(rdev);
420 }
421 }
422 mutex_unlock(&rdev->pm.mutex);
423
424 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
425 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
426}
427
36/* 428/*
37 * Debugfs info 429 * Debugfs info
38 */ 430 */
@@ -44,11 +436,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 436 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 437 struct radeon_device *rdev = dev->dev_private;
46 438
439 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
47 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 440 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
48 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 441 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
49 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 442 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
50 if (rdev->asic->get_memory_clock) 443 if (rdev->asic->get_memory_clock)
51 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 444 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
445 if (rdev->asic->get_pcie_lanes)
446 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
52 447
53 return 0; 448 return 0;
54} 449}
@@ -58,7 +453,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
58}; 453};
59#endif 454#endif
60 455
61int radeon_debugfs_pm_init(struct radeon_device *rdev) 456static int radeon_debugfs_pm_init(struct radeon_device *rdev)
62{ 457{
63#if defined(CONFIG_DEBUG_FS) 458#if defined(CONFIG_DEBUG_FS)
64 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 459 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d0a009dd4a1..5c0dc082d330 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -54,7 +54,7 @@
54#include "r300_reg.h" 54#include "r300_reg.h"
55#include "r500_reg.h" 55#include "r500_reg.h"
56#include "r600_reg.h" 56#include "r600_reg.h"
57 57#include "evergreen_reg.h"
58 58
59#define RADEON_MC_AGP_LOCATION 0x014c 59#define RADEON_MC_AGP_LOCATION 0x014c
60#define RADEON_MC_AGP_START_MASK 0x0000FFFF 60#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -1060,32 +1060,38 @@
1060 1060
1061 /* Multimedia I2C bus */ 1061 /* Multimedia I2C bus */
1062#define RADEON_I2C_CNTL_0 0x0090 1062#define RADEON_I2C_CNTL_0 0x0090
1063#define RADEON_I2C_DONE (1 << 0) 1063# define RADEON_I2C_DONE (1 << 0)
1064#define RADEON_I2C_NACK (1 << 1) 1064# define RADEON_I2C_NACK (1 << 1)
1065#define RADEON_I2C_HALT (1 << 2) 1065# define RADEON_I2C_HALT (1 << 2)
1066#define RADEON_I2C_SOFT_RST (1 << 5) 1066# define RADEON_I2C_SOFT_RST (1 << 5)
1067#define RADEON_I2C_DRIVE_EN (1 << 6) 1067# define RADEON_I2C_DRIVE_EN (1 << 6)
1068#define RADEON_I2C_DRIVE_SEL (1 << 7) 1068# define RADEON_I2C_DRIVE_SEL (1 << 7)
1069#define RADEON_I2C_START (1 << 8) 1069# define RADEON_I2C_START (1 << 8)
1070#define RADEON_I2C_STOP (1 << 9) 1070# define RADEON_I2C_STOP (1 << 9)
1071#define RADEON_I2C_RECEIVE (1 << 10) 1071# define RADEON_I2C_RECEIVE (1 << 10)
1072#define RADEON_I2C_ABORT (1 << 11) 1072# define RADEON_I2C_ABORT (1 << 11)
1073#define RADEON_I2C_GO (1 << 12) 1073# define RADEON_I2C_GO (1 << 12)
1074#define RADEON_I2C_PRESCALE_SHIFT 16 1074# define RADEON_I2C_PRESCALE_SHIFT 16
1075#define RADEON_I2C_CNTL_1 0x0094 1075#define RADEON_I2C_CNTL_1 0x0094
1076#define RADEON_I2C_DATA_COUNT_SHIFT 0 1076# define RADEON_I2C_DATA_COUNT_SHIFT 0
1077#define RADEON_I2C_ADDR_COUNT_SHIFT 4 1077# define RADEON_I2C_ADDR_COUNT_SHIFT 4
1078#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 1078# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
1079#define RADEON_I2C_SEL (1 << 16) 1079# define RADEON_I2C_SEL (1 << 16)
1080#define RADEON_I2C_EN (1 << 17) 1080# define RADEON_I2C_EN (1 << 17)
1081#define RADEON_I2C_TIME_LIMIT_SHIFT 24 1081# define RADEON_I2C_TIME_LIMIT_SHIFT 24
1082#define RADEON_I2C_DATA 0x0098 1082#define RADEON_I2C_DATA 0x0098
1083 1083
1084#define RADEON_DVI_I2C_CNTL_0 0x02e0 1084#define RADEON_DVI_I2C_CNTL_0 0x02e0
1085# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3) 1085# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
1086# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ 1086# define R200_SEL_DDC1 0 /* depends on asic */
1087# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ 1087# define R200_SEL_DDC2 1 /* depends on asic */
1088# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ 1088# define R200_SEL_DDC3 2 /* depends on asic */
1089# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
1090# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
1091# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
1092# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
1093# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
1094# define RADEON_HW_USING_DVI_I2C (1 << 15)
1089#define RADEON_DVI_I2C_CNTL_1 0x02e4 1095#define RADEON_DVI_I2C_CNTL_1 0x02e4
1090#define RADEON_DVI_I2C_DATA 0x02e8 1096#define RADEON_DVI_I2C_DATA 0x02e8
1091 1097
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6579eb4c1f28..e50513a62735 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,36 @@
34 34
35int radeon_debugfs_ib_init(struct radeon_device *rdev); 35int radeon_debugfs_ib_init(struct radeon_device *rdev);
36 36
37void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
38{
39 struct radeon_ib *ib, *n;
40
41 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
42 list_del(&ib->list);
43 vfree(ib->ptr);
44 kfree(ib);
45 }
46}
47
48void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
49{
50 struct radeon_ib *bib;
51
52 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
53 if (bib == NULL)
54 return;
55 bib->ptr = vmalloc(ib->length_dw * 4);
56 if (bib->ptr == NULL) {
57 kfree(bib);
58 return;
59 }
60 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
61 bib->length_dw = ib->length_dw;
62 mutex_lock(&rdev->ib_pool.mutex);
63 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
64 mutex_unlock(&rdev->ib_pool.mutex);
65}
66
37/* 67/*
38 * IB. 68 * IB.
39 */ 69 */
@@ -142,6 +172,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
142 172
143 if (rdev->ib_pool.robj) 173 if (rdev->ib_pool.robj)
144 return 0; 174 return 0;
175 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
145 /* Allocate 1M object buffer */ 176 /* Allocate 1M object buffer */
146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 177 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
147 true, RADEON_GEM_DOMAIN_GTT, 178 true, RADEON_GEM_DOMAIN_GTT,
@@ -192,6 +223,8 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
192 return; 223 return;
193 } 224 }
194 mutex_lock(&rdev->ib_pool.mutex); 225 mutex_lock(&rdev->ib_pool.mutex);
226 radeon_ib_bogus_cleanup(rdev);
227
195 if (rdev->ib_pool.robj) { 228 if (rdev->ib_pool.robj) {
196 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 229 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
197 if (likely(r == 0)) { 230 if (likely(r == 0)) {
@@ -349,15 +382,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
349 return 0; 382 return 0;
350} 383}
351 384
385static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
386{
387 struct drm_info_node *node = (struct drm_info_node *) m->private;
388 struct radeon_device *rdev = node->info_ent->data;
389 struct radeon_ib *ib;
390 unsigned i;
391
392 mutex_lock(&rdev->ib_pool.mutex);
393 if (list_empty(&rdev->ib_pool.bogus_ib)) {
394 mutex_unlock(&rdev->ib_pool.mutex);
395 seq_printf(m, "no bogus IB recorded\n");
396 return 0;
397 }
398 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
399 list_del_init(&ib->list);
400 mutex_unlock(&rdev->ib_pool.mutex);
401 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
402 for (i = 0; i < ib->length_dw; i++) {
403 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
404 }
405 vfree(ib->ptr);
406 kfree(ib);
407 return 0;
408}
409
352static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; 410static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
353static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; 411static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
412
413static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
414 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
415};
354#endif 416#endif
355 417
356int radeon_debugfs_ib_init(struct radeon_device *rdev) 418int radeon_debugfs_ib_init(struct radeon_device *rdev)
357{ 419{
358#if defined(CONFIG_DEBUG_FS) 420#if defined(CONFIG_DEBUG_FS)
359 unsigned i; 421 unsigned i;
422 int r;
360 423
424 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
425 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
426 if (r)
427 return r;
361 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 428 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
362 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); 429 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
363 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; 430 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 067167cb39ca..3c32f840dcd2 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -29,6 +29,7 @@
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "drm.h" 31#include "drm.h"
32#include "drm_buffer.h"
32#include "drm_sarea.h" 33#include "drm_sarea.h"
33#include "radeon_drm.h" 34#include "radeon_drm.h"
34#include "radeon_drv.h" 35#include "radeon_drv.h"
@@ -91,21 +92,27 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * 92static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
92 dev_priv, 93 dev_priv,
93 struct drm_file *file_priv, 94 struct drm_file *file_priv,
94 int id, u32 *data) 95 int id, struct drm_buffer *buf)
95{ 96{
97 u32 *data;
96 switch (id) { 98 switch (id) {
97 99
98 case RADEON_EMIT_PP_MISC: 100 case RADEON_EMIT_PP_MISC:
99 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 101 data = drm_buffer_pointer_to_dword(buf,
100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 102 (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
103
104 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
101 DRM_ERROR("Invalid depth buffer offset\n"); 105 DRM_ERROR("Invalid depth buffer offset\n");
102 return -EINVAL; 106 return -EINVAL;
103 } 107 }
108 dev_priv->have_z_offset = 1;
104 break; 109 break;
105 110
106 case RADEON_EMIT_PP_CNTL: 111 case RADEON_EMIT_PP_CNTL:
107 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 112 data = drm_buffer_pointer_to_dword(buf,
108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 113 (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
114
115 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
109 DRM_ERROR("Invalid colour buffer offset\n"); 116 DRM_ERROR("Invalid colour buffer offset\n");
110 return -EINVAL; 117 return -EINVAL;
111 } 118 }
@@ -117,8 +124,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
117 case R200_EMIT_PP_TXOFFSET_3: 124 case R200_EMIT_PP_TXOFFSET_3:
118 case R200_EMIT_PP_TXOFFSET_4: 125 case R200_EMIT_PP_TXOFFSET_4:
119 case R200_EMIT_PP_TXOFFSET_5: 126 case R200_EMIT_PP_TXOFFSET_5:
120 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 127 data = drm_buffer_pointer_to_dword(buf, 0);
121 &data[0])) { 128 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
122 DRM_ERROR("Invalid R200 texture offset\n"); 129 DRM_ERROR("Invalid R200 texture offset\n");
123 return -EINVAL; 130 return -EINVAL;
124 } 131 }
@@ -127,8 +134,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
127 case RADEON_EMIT_PP_TXFILTER_0: 134 case RADEON_EMIT_PP_TXFILTER_0:
128 case RADEON_EMIT_PP_TXFILTER_1: 135 case RADEON_EMIT_PP_TXFILTER_1:
129 case RADEON_EMIT_PP_TXFILTER_2: 136 case RADEON_EMIT_PP_TXFILTER_2:
130 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 137 data = drm_buffer_pointer_to_dword(buf,
131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 138 (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
139 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
132 DRM_ERROR("Invalid R100 texture offset\n"); 140 DRM_ERROR("Invalid R100 texture offset\n");
133 return -EINVAL; 141 return -EINVAL;
134 } 142 }
@@ -142,9 +150,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
142 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 150 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
143 int i; 151 int i;
144 for (i = 0; i < 5; i++) { 152 for (i = 0; i < 5; i++) {
153 data = drm_buffer_pointer_to_dword(buf, i);
145 if (radeon_check_and_fixup_offset(dev_priv, 154 if (radeon_check_and_fixup_offset(dev_priv,
146 file_priv, 155 file_priv,
147 &data[i])) { 156 data)) {
148 DRM_ERROR 157 DRM_ERROR
149 ("Invalid R200 cubic texture offset\n"); 158 ("Invalid R200 cubic texture offset\n");
150 return -EINVAL; 159 return -EINVAL;
@@ -158,9 +167,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
158 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ 167 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
159 int i; 168 int i;
160 for (i = 0; i < 5; i++) { 169 for (i = 0; i < 5; i++) {
170 data = drm_buffer_pointer_to_dword(buf, i);
161 if (radeon_check_and_fixup_offset(dev_priv, 171 if (radeon_check_and_fixup_offset(dev_priv,
162 file_priv, 172 file_priv,
163 &data[i])) { 173 data)) {
164 DRM_ERROR 174 DRM_ERROR
165 ("Invalid R100 cubic texture offset\n"); 175 ("Invalid R100 cubic texture offset\n");
166 return -EINVAL; 176 return -EINVAL;
@@ -269,23 +279,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
269 cmdbuf, 279 cmdbuf,
270 unsigned int *cmdsz) 280 unsigned int *cmdsz)
271{ 281{
272 u32 *cmd = (u32 *) cmdbuf->buf; 282 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
273 u32 offset, narrays; 283 u32 offset, narrays;
274 int count, i, k; 284 int count, i, k;
275 285
276 *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); 286 count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
287 *cmdsz = 2 + count;
277 288
278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { 289 if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
279 DRM_ERROR("Not a type 3 packet\n"); 290 DRM_ERROR("Not a type 3 packet\n");
280 return -EINVAL; 291 return -EINVAL;
281 } 292 }
282 293
283 if (4 * *cmdsz > cmdbuf->bufsz) { 294 if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
284 DRM_ERROR("Packet size larger than size of data provided\n"); 295 DRM_ERROR("Packet size larger than size of data provided\n");
285 return -EINVAL; 296 return -EINVAL;
286 } 297 }
287 298
288 switch(cmd[0] & 0xff00) { 299 switch (*cmd & 0xff00) {
289 /* XXX Are there old drivers needing other packets? */ 300 /* XXX Are there old drivers needing other packets? */
290 301
291 case RADEON_3D_DRAW_IMMD: 302 case RADEON_3D_DRAW_IMMD:
@@ -312,7 +323,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
312 break; 323 break;
313 324
314 case RADEON_3D_LOAD_VBPNTR: 325 case RADEON_3D_LOAD_VBPNTR:
315 count = (cmd[0] >> 16) & 0x3fff;
316 326
317 if (count > 18) { /* 12 arrays max */ 327 if (count > 18) { /* 12 arrays max */
318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 328 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
@@ -321,13 +331,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
321 } 331 }
322 332
323 /* carefully check packet contents */ 333 /* carefully check packet contents */
324 narrays = cmd[1] & ~0xc000; 334 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
335
336 narrays = *cmd & ~0xc000;
325 k = 0; 337 k = 0;
326 i = 2; 338 i = 2;
327 while ((k < narrays) && (i < (count + 2))) { 339 while ((k < narrays) && (i < (count + 2))) {
328 i++; /* skip attribute field */ 340 i++; /* skip attribute field */
341 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
329 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 342 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
330 &cmd[i])) { 343 cmd)) {
331 DRM_ERROR 344 DRM_ERROR
332 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
333 k, i); 346 k, i);
@@ -338,8 +351,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
338 if (k == narrays) 351 if (k == narrays)
339 break; 352 break;
340 /* have one more to process, they come in pairs */ 353 /* have one more to process, they come in pairs */
354 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
355
341 if (radeon_check_and_fixup_offset(dev_priv, 356 if (radeon_check_and_fixup_offset(dev_priv,
342 file_priv, &cmd[i])) 357 file_priv, cmd))
343 { 358 {
344 DRM_ERROR 359 DRM_ERROR
345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 360 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
@@ -363,7 +378,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
363 DRM_ERROR("Invalid 3d packet for r200-class chip\n"); 378 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
364 return -EINVAL; 379 return -EINVAL;
365 } 380 }
366 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { 381
382 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
383 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
367 DRM_ERROR("Invalid rndr_gen_indx offset\n"); 384 DRM_ERROR("Invalid rndr_gen_indx offset\n");
368 return -EINVAL; 385 return -EINVAL;
369 } 386 }
@@ -374,12 +391,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
374 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 391 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
375 return -EINVAL; 392 return -EINVAL;
376 } 393 }
377 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 394
378 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 395 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
396 if ((*cmd & 0x8000ffff) != 0x80000810) {
397 DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
379 return -EINVAL; 398 return -EINVAL;
380 } 399 }
381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { 400 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
382 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 401 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
402 DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
383 return -EINVAL; 403 return -EINVAL;
384 } 404 }
385 break; 405 break;
@@ -388,31 +408,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
388 case RADEON_CNTL_PAINT_MULTI: 408 case RADEON_CNTL_PAINT_MULTI:
389 case RADEON_CNTL_BITBLT_MULTI: 409 case RADEON_CNTL_BITBLT_MULTI:
390 /* MSB of opcode: next DWORD GUI_CNTL */ 410 /* MSB of opcode: next DWORD GUI_CNTL */
391 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 411 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
412 if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
392 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 413 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
393 offset = cmd[2] << 10; 414 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
415 offset = *cmd2 << 10;
394 if (radeon_check_and_fixup_offset 416 if (radeon_check_and_fixup_offset
395 (dev_priv, file_priv, &offset)) { 417 (dev_priv, file_priv, &offset)) {
396 DRM_ERROR("Invalid first packet offset\n"); 418 DRM_ERROR("Invalid first packet offset\n");
397 return -EINVAL; 419 return -EINVAL;
398 } 420 }
399 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; 421 *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
400 } 422 }
401 423
402 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
403 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
404 offset = cmd[3] << 10; 426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
427 offset = *cmd << 10;
405 if (radeon_check_and_fixup_offset 428 if (radeon_check_and_fixup_offset
406 (dev_priv, file_priv, &offset)) { 429 (dev_priv, file_priv, &offset)) {
407 DRM_ERROR("Invalid second packet offset\n"); 430 DRM_ERROR("Invalid second packet offset\n");
408 return -EINVAL; 431 return -EINVAL;
409 } 432 }
410 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; 433 *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
411 } 434 }
412 break; 435 break;
413 436
414 default: 437 default:
415 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); 438 DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
416 return -EINVAL; 439 return -EINVAL;
417 } 440 }
418 441
@@ -876,6 +899,11 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
876 if (tmp & RADEON_BACK) 899 if (tmp & RADEON_BACK)
877 flags |= RADEON_FRONT; 900 flags |= RADEON_FRONT;
878 } 901 }
902 if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
903 if (!dev_priv->have_z_offset)
904 printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
905 flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
906 }
879 907
880 if (flags & (RADEON_FRONT | RADEON_BACK)) { 908 if (flags & (RADEON_FRONT | RADEON_BACK)) {
881 909
@@ -2611,7 +2639,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2611{ 2639{
2612 int id = (int)header.packet.packet_id; 2640 int id = (int)header.packet.packet_id;
2613 int sz, reg; 2641 int sz, reg;
2614 int *data = (int *)cmdbuf->buf;
2615 RING_LOCALS; 2642 RING_LOCALS;
2616 2643
2617 if (id >= RADEON_MAX_STATE_PACKETS) 2644 if (id >= RADEON_MAX_STATE_PACKETS)
@@ -2620,23 +2647,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2620 sz = packet[id].len; 2647 sz = packet[id].len;
2621 reg = packet[id].start; 2648 reg = packet[id].start;
2622 2649
2623 if (sz * sizeof(int) > cmdbuf->bufsz) { 2650 if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
2624 DRM_ERROR("Packet size provided larger than data provided\n"); 2651 DRM_ERROR("Packet size provided larger than data provided\n");
2625 return -EINVAL; 2652 return -EINVAL;
2626 } 2653 }
2627 2654
2628 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { 2655 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
2656 cmdbuf->buffer)) {
2629 DRM_ERROR("Packet verification failed\n"); 2657 DRM_ERROR("Packet verification failed\n");
2630 return -EINVAL; 2658 return -EINVAL;
2631 } 2659 }
2632 2660
2633 BEGIN_RING(sz + 1); 2661 BEGIN_RING(sz + 1);
2634 OUT_RING(CP_PACKET0(reg, (sz - 1))); 2662 OUT_RING(CP_PACKET0(reg, (sz - 1)));
2635 OUT_RING_TABLE(data, sz); 2663 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2636 ADVANCE_RING(); 2664 ADVANCE_RING();
2637 2665
2638 cmdbuf->buf += sz * sizeof(int);
2639 cmdbuf->bufsz -= sz * sizeof(int);
2640 return 0; 2666 return 0;
2641} 2667}
2642 2668
@@ -2653,10 +2679,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2653 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2679 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2654 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2680 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2655 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2681 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2656 OUT_RING_TABLE(cmdbuf->buf, sz); 2682 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2657 ADVANCE_RING(); 2683 ADVANCE_RING();
2658 cmdbuf->buf += sz * sizeof(int);
2659 cmdbuf->bufsz -= sz * sizeof(int);
2660 return 0; 2684 return 0;
2661} 2685}
2662 2686
@@ -2675,10 +2699,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2675 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2699 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2676 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2700 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2677 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2701 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2678 OUT_RING_TABLE(cmdbuf->buf, sz); 2702 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2679 ADVANCE_RING(); 2703 ADVANCE_RING();
2680 cmdbuf->buf += sz * sizeof(int);
2681 cmdbuf->bufsz -= sz * sizeof(int);
2682 return 0; 2704 return 0;
2683} 2705}
2684 2706
@@ -2696,11 +2718,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2696 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2718 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2697 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2719 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2698 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2720 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2699 OUT_RING_TABLE(cmdbuf->buf, sz); 2721 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2700 ADVANCE_RING(); 2722 ADVANCE_RING();
2701 2723
2702 cmdbuf->buf += sz * sizeof(int);
2703 cmdbuf->bufsz -= sz * sizeof(int);
2704 return 0; 2724 return 0;
2705} 2725}
2706 2726
@@ -2714,7 +2734,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2714 2734
2715 if (!sz) 2735 if (!sz)
2716 return 0; 2736 return 0;
2717 if (sz * 4 > cmdbuf->bufsz) 2737 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
2718 return -EINVAL; 2738 return -EINVAL;
2719 2739
2720 BEGIN_RING(5 + sz); 2740 BEGIN_RING(5 + sz);
@@ -2722,11 +2742,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2722 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2742 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2723 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2743 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2724 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2744 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2725 OUT_RING_TABLE(cmdbuf->buf, sz); 2745 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2726 ADVANCE_RING(); 2746 ADVANCE_RING();
2727 2747
2728 cmdbuf->buf += sz * sizeof(int);
2729 cmdbuf->bufsz -= sz * sizeof(int);
2730 return 0; 2748 return 0;
2731} 2749}
2732 2750
@@ -2748,11 +2766,9 @@ static int radeon_emit_packet3(struct drm_device * dev,
2748 } 2766 }
2749 2767
2750 BEGIN_RING(cmdsz); 2768 BEGIN_RING(cmdsz);
2751 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2769 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2752 ADVANCE_RING(); 2770 ADVANCE_RING();
2753 2771
2754 cmdbuf->buf += cmdsz * 4;
2755 cmdbuf->bufsz -= cmdsz * 4;
2756 return 0; 2772 return 0;
2757} 2773}
2758 2774
@@ -2805,16 +2821,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2805 } 2821 }
2806 2822
2807 BEGIN_RING(cmdsz); 2823 BEGIN_RING(cmdsz);
2808 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2824 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2809 ADVANCE_RING(); 2825 ADVANCE_RING();
2810 2826
2811 } while (++i < cmdbuf->nbox); 2827 } while (++i < cmdbuf->nbox);
2812 if (cmdbuf->nbox == 1) 2828 if (cmdbuf->nbox == 1)
2813 cmdbuf->nbox = 0; 2829 cmdbuf->nbox = 0;
2814 2830
2831 return 0;
2815 out: 2832 out:
2816 cmdbuf->buf += cmdsz * 4; 2833 drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
2817 cmdbuf->bufsz -= cmdsz * 4;
2818 return 0; 2834 return 0;
2819} 2835}
2820 2836
@@ -2847,16 +2863,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
2847 return 0; 2863 return 0;
2848} 2864}
2849 2865
2850static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) 2866static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2867 struct drm_file *file_priv)
2851{ 2868{
2852 drm_radeon_private_t *dev_priv = dev->dev_private; 2869 drm_radeon_private_t *dev_priv = dev->dev_private;
2853 struct drm_device_dma *dma = dev->dma; 2870 struct drm_device_dma *dma = dev->dma;
2854 struct drm_buf *buf = NULL; 2871 struct drm_buf *buf = NULL;
2872 drm_radeon_cmd_header_t stack_header;
2855 int idx; 2873 int idx;
2856 drm_radeon_kcmd_buffer_t *cmdbuf = data; 2874 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2857 drm_radeon_cmd_header_t header; 2875 int orig_nbox;
2858 int orig_nbox, orig_bufsz;
2859 char *kbuf = NULL;
2860 2876
2861 LOCK_TEST_WITH_RETURN(dev, file_priv); 2877 LOCK_TEST_WITH_RETURN(dev, file_priv);
2862 2878
@@ -2871,17 +2887,16 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2871 * races between checking values and using those values in other code, 2887 * races between checking values and using those values in other code,
2872 * and simply to avoid a lot of function calls to copy in data. 2888 * and simply to avoid a lot of function calls to copy in data.
2873 */ 2889 */
2874 orig_bufsz = cmdbuf->bufsz; 2890 if (cmdbuf->bufsz != 0) {
2875 if (orig_bufsz != 0) { 2891 int rv;
2876 kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL); 2892 void __user *buffer = cmdbuf->buffer;
2877 if (kbuf == NULL) 2893 rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
2878 return -ENOMEM; 2894 if (rv)
2879 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2895 return rv;
2880 cmdbuf->bufsz)) { 2896 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
2881 kfree(kbuf); 2897 cmdbuf->bufsz);
2882 return -EFAULT; 2898 if (rv)
2883 } 2899 return rv;
2884 cmdbuf->buf = kbuf;
2885 } 2900 }
2886 2901
2887 orig_nbox = cmdbuf->nbox; 2902 orig_nbox = cmdbuf->nbox;
@@ -2890,24 +2905,24 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2890 int temp; 2905 int temp;
2891 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2906 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2892 2907
2893 if (orig_bufsz != 0) 2908 if (cmdbuf->bufsz != 0)
2894 kfree(kbuf); 2909 drm_buffer_free(cmdbuf->buffer);
2895 2910
2896 return temp; 2911 return temp;
2897 } 2912 }
2898 2913
2899 /* microcode_version != r300 */ 2914 /* microcode_version != r300 */
2900 while (cmdbuf->bufsz >= sizeof(header)) { 2915 while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
2901 2916
2902 header.i = *(int *)cmdbuf->buf; 2917 drm_radeon_cmd_header_t *header;
2903 cmdbuf->buf += sizeof(header); 2918 header = drm_buffer_read_object(cmdbuf->buffer,
2904 cmdbuf->bufsz -= sizeof(header); 2919 sizeof(stack_header), &stack_header);
2905 2920
2906 switch (header.header.cmd_type) { 2921 switch (header->header.cmd_type) {
2907 case RADEON_CMD_PACKET: 2922 case RADEON_CMD_PACKET:
2908 DRM_DEBUG("RADEON_CMD_PACKET\n"); 2923 DRM_DEBUG("RADEON_CMD_PACKET\n");
2909 if (radeon_emit_packets 2924 if (radeon_emit_packets
2910 (dev_priv, file_priv, header, cmdbuf)) { 2925 (dev_priv, file_priv, *header, cmdbuf)) {
2911 DRM_ERROR("radeon_emit_packets failed\n"); 2926 DRM_ERROR("radeon_emit_packets failed\n");
2912 goto err; 2927 goto err;
2913 } 2928 }
@@ -2915,7 +2930,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2915 2930
2916 case RADEON_CMD_SCALARS: 2931 case RADEON_CMD_SCALARS:
2917 DRM_DEBUG("RADEON_CMD_SCALARS\n"); 2932 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2918 if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { 2933 if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
2919 DRM_ERROR("radeon_emit_scalars failed\n"); 2934 DRM_ERROR("radeon_emit_scalars failed\n");
2920 goto err; 2935 goto err;
2921 } 2936 }
@@ -2923,7 +2938,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2923 2938
2924 case RADEON_CMD_VECTORS: 2939 case RADEON_CMD_VECTORS:
2925 DRM_DEBUG("RADEON_CMD_VECTORS\n"); 2940 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2926 if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { 2941 if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
2927 DRM_ERROR("radeon_emit_vectors failed\n"); 2942 DRM_ERROR("radeon_emit_vectors failed\n");
2928 goto err; 2943 goto err;
2929 } 2944 }
@@ -2931,7 +2946,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2931 2946
2932 case RADEON_CMD_DMA_DISCARD: 2947 case RADEON_CMD_DMA_DISCARD:
2933 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 2948 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2934 idx = header.dma.buf_idx; 2949 idx = header->dma.buf_idx;
2935 if (idx < 0 || idx >= dma->buf_count) { 2950 if (idx < 0 || idx >= dma->buf_count) {
2936 DRM_ERROR("buffer index %d (of %d max)\n", 2951 DRM_ERROR("buffer index %d (of %d max)\n",
2937 idx, dma->buf_count - 1); 2952 idx, dma->buf_count - 1);
@@ -2968,7 +2983,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2968 2983
2969 case RADEON_CMD_SCALARS2: 2984 case RADEON_CMD_SCALARS2:
2970 DRM_DEBUG("RADEON_CMD_SCALARS2\n"); 2985 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2971 if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { 2986 if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
2972 DRM_ERROR("radeon_emit_scalars2 failed\n"); 2987 DRM_ERROR("radeon_emit_scalars2 failed\n");
2973 goto err; 2988 goto err;
2974 } 2989 }
@@ -2976,37 +2991,37 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2976 2991
2977 case RADEON_CMD_WAIT: 2992 case RADEON_CMD_WAIT:
2978 DRM_DEBUG("RADEON_CMD_WAIT\n"); 2993 DRM_DEBUG("RADEON_CMD_WAIT\n");
2979 if (radeon_emit_wait(dev, header.wait.flags)) { 2994 if (radeon_emit_wait(dev, header->wait.flags)) {
2980 DRM_ERROR("radeon_emit_wait failed\n"); 2995 DRM_ERROR("radeon_emit_wait failed\n");
2981 goto err; 2996 goto err;
2982 } 2997 }
2983 break; 2998 break;
2984 case RADEON_CMD_VECLINEAR: 2999 case RADEON_CMD_VECLINEAR:
2985 DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); 3000 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
2986 if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { 3001 if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
2987 DRM_ERROR("radeon_emit_veclinear failed\n"); 3002 DRM_ERROR("radeon_emit_veclinear failed\n");
2988 goto err; 3003 goto err;
2989 } 3004 }
2990 break; 3005 break;
2991 3006
2992 default: 3007 default:
2993 DRM_ERROR("bad cmd_type %d at %p\n", 3008 DRM_ERROR("bad cmd_type %d at byte %d\n",
2994 header.header.cmd_type, 3009 header->header.cmd_type,
2995 cmdbuf->buf - sizeof(header)); 3010 cmdbuf->buffer->iterator);
2996 goto err; 3011 goto err;
2997 } 3012 }
2998 } 3013 }
2999 3014
3000 if (orig_bufsz != 0) 3015 if (cmdbuf->bufsz != 0)
3001 kfree(kbuf); 3016 drm_buffer_free(cmdbuf->buffer);
3002 3017
3003 DRM_DEBUG("DONE\n"); 3018 DRM_DEBUG("DONE\n");
3004 COMMIT_RING(); 3019 COMMIT_RING();
3005 return 0; 3020 return 0;
3006 3021
3007 err: 3022 err:
3008 if (orig_bufsz != 0) 3023 if (cmdbuf->bufsz != 0)
3009 kfree(kbuf); 3024 drm_buffer_free(cmdbuf->buffer);
3010 return -EINVAL; 3025 return -EINVAL;
3011} 3026}
3012 3027
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 9f5e2f929da9..313c96bc09da 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -186,7 +186,7 @@ void radeon_test_moves(struct radeon_device *rdev)
186 radeon_bo_kunmap(gtt_obj[i]); 186 radeon_bo_kunmap(gtt_obj[i]);
187 187
188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
189 gtt_addr - rdev->mc.gtt_location); 189 gtt_addr - rdev->mc.gtt_start);
190 } 190 }
191 191
192out_cleanup: 192out_cleanup:
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 58b5adf974ca..43c5ab34b634 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 man->default_caching = TTM_PL_FLAG_CACHED; 150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break; 151 break;
152 case TTM_PL_TT: 152 case TTM_PL_TT:
153 man->gpu_offset = rdev->mc.gtt_location; 153 man->gpu_offset = rdev->mc.gtt_start;
154 man->available_caching = TTM_PL_MASK_CACHING; 154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED; 155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
180 break; 180 break;
181 case TTM_PL_VRAM: 181 case TTM_PL_VRAM:
182 /* "On-card" video ram */ 182 /* "On-card" video ram */
183 man->gpu_offset = rdev->mc.vram_location; 183 man->gpu_offset = rdev->mc.vram_start;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED | 184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE; 186 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -262,10 +262,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
262 262
263 switch (old_mem->mem_type) { 263 switch (old_mem->mem_type) {
264 case TTM_PL_VRAM: 264 case TTM_PL_VRAM:
265 old_start += rdev->mc.vram_location; 265 old_start += rdev->mc.vram_start;
266 break; 266 break;
267 case TTM_PL_TT: 267 case TTM_PL_TT:
268 old_start += rdev->mc.gtt_location; 268 old_start += rdev->mc.gtt_start;
269 break; 269 break;
270 default: 270 default:
271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -273,10 +273,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
273 } 273 }
274 switch (new_mem->mem_type) { 274 switch (new_mem->mem_type) {
275 case TTM_PL_VRAM: 275 case TTM_PL_VRAM:
276 new_start += rdev->mc.vram_location; 276 new_start += rdev->mc.vram_start;
277 break; 277 break;
278 case TTM_PL_TT: 278 case TTM_PL_TT:
279 new_start += rdev->mc.gtt_location; 279 new_start += rdev->mc.gtt_start;
280 break; 280 break;
281 default: 281 default:
282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 000000000000..8f414a5f520f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,837 @@
1r600 0x9400
20x000287A0 R7xx_CB_SHADER_CONTROL
30x00028230 R7xx_PA_SC_EDGERULE
40x000286C8 R7xx_SPI_THREAD_GROUPING
50x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
60x000088C4 VGT_CACHE_INVALIDATION
70x00028A50 VGT_ENHANCE
80x000088CC VGT_ES_PER_GS
90x00028A2C VGT_GROUP_DECR
100x00028A28 VGT_GROUP_FIRST_DECR
110x00028A24 VGT_GROUP_PRIM_TYPE
120x00028A30 VGT_GROUP_VECT_0_CNTL
130x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
140x00028A34 VGT_GROUP_VECT_1_CNTL
150x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
160x00028A40 VGT_GS_MODE
170x00028A6C VGT_GS_OUT_PRIM_TYPE
180x000088C8 VGT_GS_PER_ES
190x000088E8 VGT_GS_PER_VS
200x000088D4 VGT_GS_VERTEX_REUSE
210x00028A14 VGT_HOS_CNTL
220x00028A18 VGT_HOS_MAX_TESS_LEVEL
230x00028A1C VGT_HOS_MIN_TESS_LEVEL
240x00028A20 VGT_HOS_REUSE_DEPTH
250x0000895C VGT_INDEX_TYPE
260x00028408 VGT_INDX_OFFSET
270x00028AA0 VGT_INSTANCE_STEP_RATE_0
280x00028AA4 VGT_INSTANCE_STEP_RATE_1
290x000088C0 VGT_LAST_COPY_STATE
300x00028400 VGT_MAX_VTX_INDX
310x000088D8 VGT_MC_LAT_CNTL
320x00028404 VGT_MIN_VTX_INDX
330x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
340x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
350x00008970 VGT_NUM_INDICES
360x00008974 VGT_NUM_INSTANCES
370x00028A10 VGT_OUTPUT_PATH_CNTL
380x00028C5C VGT_OUT_DEALLOC_CNTL
390x00028A84 VGT_PRIMITIVEID_EN
400x00008958 VGT_PRIMITIVE_TYPE
410x00028AB4 VGT_REUSE_OFF
420x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
430x00028AB8 VGT_VTX_CNT_EN
440x000088B0 VGT_VTX_VECT_EJECT_REG
450x00028810 PA_CL_CLIP_CNTL
460x00008A14 PA_CL_ENHANCE
470x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
480x00028C18 PA_CL_GB_HORZ_DISC_ADJ
490x00028C0C PA_CL_GB_VERT_CLIP_ADJ
500x00028C10 PA_CL_GB_VERT_DISC_ADJ
510x00028820 PA_CL_NANINF_CNTL
520x00028E1C PA_CL_POINT_CULL_RAD
530x00028E18 PA_CL_POINT_SIZE
540x00028E10 PA_CL_POINT_X_RAD
550x00028E14 PA_CL_POINT_Y_RAD
560x00028E2C PA_CL_UCP_0_W
570x00028E3C PA_CL_UCP_1_W
580x00028E4C PA_CL_UCP_2_W
590x00028E5C PA_CL_UCP_3_W
600x00028E6C PA_CL_UCP_4_W
610x00028E7C PA_CL_UCP_5_W
620x00028E20 PA_CL_UCP_0_X
630x00028E30 PA_CL_UCP_1_X
640x00028E40 PA_CL_UCP_2_X
650x00028E50 PA_CL_UCP_3_X
660x00028E60 PA_CL_UCP_4_X
670x00028E70 PA_CL_UCP_5_X
680x00028E24 PA_CL_UCP_0_Y
690x00028E34 PA_CL_UCP_1_Y
700x00028E44 PA_CL_UCP_2_Y
710x00028E54 PA_CL_UCP_3_Y
720x00028E64 PA_CL_UCP_4_Y
730x00028E74 PA_CL_UCP_5_Y
740x00028E28 PA_CL_UCP_0_Z
750x00028E38 PA_CL_UCP_1_Z
760x00028E48 PA_CL_UCP_2_Z
770x00028E58 PA_CL_UCP_3_Z
780x00028E68 PA_CL_UCP_4_Z
790x00028E78 PA_CL_UCP_5_Z
800x00028440 PA_CL_VPORT_XOFFSET_0
810x00028458 PA_CL_VPORT_XOFFSET_1
820x00028470 PA_CL_VPORT_XOFFSET_2
830x00028488 PA_CL_VPORT_XOFFSET_3
840x000284A0 PA_CL_VPORT_XOFFSET_4
850x000284B8 PA_CL_VPORT_XOFFSET_5
860x000284D0 PA_CL_VPORT_XOFFSET_6
870x000284E8 PA_CL_VPORT_XOFFSET_7
880x00028500 PA_CL_VPORT_XOFFSET_8
890x00028518 PA_CL_VPORT_XOFFSET_9
900x00028530 PA_CL_VPORT_XOFFSET_10
910x00028548 PA_CL_VPORT_XOFFSET_11
920x00028560 PA_CL_VPORT_XOFFSET_12
930x00028578 PA_CL_VPORT_XOFFSET_13
940x00028590 PA_CL_VPORT_XOFFSET_14
950x000285A8 PA_CL_VPORT_XOFFSET_15
960x0002843C PA_CL_VPORT_XSCALE_0
970x00028454 PA_CL_VPORT_XSCALE_1
980x0002846C PA_CL_VPORT_XSCALE_2
990x00028484 PA_CL_VPORT_XSCALE_3
1000x0002849C PA_CL_VPORT_XSCALE_4
1010x000284B4 PA_CL_VPORT_XSCALE_5
1020x000284CC PA_CL_VPORT_XSCALE_6
1030x000284E4 PA_CL_VPORT_XSCALE_7
1040x000284FC PA_CL_VPORT_XSCALE_8
1050x00028514 PA_CL_VPORT_XSCALE_9
1060x0002852C PA_CL_VPORT_XSCALE_10
1070x00028544 PA_CL_VPORT_XSCALE_11
1080x0002855C PA_CL_VPORT_XSCALE_12
1090x00028574 PA_CL_VPORT_XSCALE_13
1100x0002858C PA_CL_VPORT_XSCALE_14
1110x000285A4 PA_CL_VPORT_XSCALE_15
1120x00028448 PA_CL_VPORT_YOFFSET_0
1130x00028460 PA_CL_VPORT_YOFFSET_1
1140x00028478 PA_CL_VPORT_YOFFSET_2
1150x00028490 PA_CL_VPORT_YOFFSET_3
1160x000284A8 PA_CL_VPORT_YOFFSET_4
1170x000284C0 PA_CL_VPORT_YOFFSET_5
1180x000284D8 PA_CL_VPORT_YOFFSET_6
1190x000284F0 PA_CL_VPORT_YOFFSET_7
1200x00028508 PA_CL_VPORT_YOFFSET_8
1210x00028520 PA_CL_VPORT_YOFFSET_9
1220x00028538 PA_CL_VPORT_YOFFSET_10
1230x00028550 PA_CL_VPORT_YOFFSET_11
1240x00028568 PA_CL_VPORT_YOFFSET_12
1250x00028580 PA_CL_VPORT_YOFFSET_13
1260x00028598 PA_CL_VPORT_YOFFSET_14
1270x000285B0 PA_CL_VPORT_YOFFSET_15
1280x00028444 PA_CL_VPORT_YSCALE_0
1290x0002845C PA_CL_VPORT_YSCALE_1
1300x00028474 PA_CL_VPORT_YSCALE_2
1310x0002848C PA_CL_VPORT_YSCALE_3
1320x000284A4 PA_CL_VPORT_YSCALE_4
1330x000284BC PA_CL_VPORT_YSCALE_5
1340x000284D4 PA_CL_VPORT_YSCALE_6
1350x000284EC PA_CL_VPORT_YSCALE_7
1360x00028504 PA_CL_VPORT_YSCALE_8
1370x0002851C PA_CL_VPORT_YSCALE_9
1380x00028534 PA_CL_VPORT_YSCALE_10
1390x0002854C PA_CL_VPORT_YSCALE_11
1400x00028564 PA_CL_VPORT_YSCALE_12
1410x0002857C PA_CL_VPORT_YSCALE_13
1420x00028594 PA_CL_VPORT_YSCALE_14
1430x000285AC PA_CL_VPORT_YSCALE_15
1440x00028450 PA_CL_VPORT_ZOFFSET_0
1450x00028468 PA_CL_VPORT_ZOFFSET_1
1460x00028480 PA_CL_VPORT_ZOFFSET_2
1470x00028498 PA_CL_VPORT_ZOFFSET_3
1480x000284B0 PA_CL_VPORT_ZOFFSET_4
1490x000284C8 PA_CL_VPORT_ZOFFSET_5
1500x000284E0 PA_CL_VPORT_ZOFFSET_6
1510x000284F8 PA_CL_VPORT_ZOFFSET_7
1520x00028510 PA_CL_VPORT_ZOFFSET_8
1530x00028528 PA_CL_VPORT_ZOFFSET_9
1540x00028540 PA_CL_VPORT_ZOFFSET_10
1550x00028558 PA_CL_VPORT_ZOFFSET_11
1560x00028570 PA_CL_VPORT_ZOFFSET_12
1570x00028588 PA_CL_VPORT_ZOFFSET_13
1580x000285A0 PA_CL_VPORT_ZOFFSET_14
1590x000285B8 PA_CL_VPORT_ZOFFSET_15
1600x0002844C PA_CL_VPORT_ZSCALE_0
1610x00028464 PA_CL_VPORT_ZSCALE_1
1620x0002847C PA_CL_VPORT_ZSCALE_2
1630x00028494 PA_CL_VPORT_ZSCALE_3
1640x000284AC PA_CL_VPORT_ZSCALE_4
1650x000284C4 PA_CL_VPORT_ZSCALE_5
1660x000284DC PA_CL_VPORT_ZSCALE_6
1670x000284F4 PA_CL_VPORT_ZSCALE_7
1680x0002850C PA_CL_VPORT_ZSCALE_8
1690x00028524 PA_CL_VPORT_ZSCALE_9
1700x0002853C PA_CL_VPORT_ZSCALE_10
1710x00028554 PA_CL_VPORT_ZSCALE_11
1720x0002856C PA_CL_VPORT_ZSCALE_12
1730x00028584 PA_CL_VPORT_ZSCALE_13
1740x0002859C PA_CL_VPORT_ZSCALE_14
1750x000285B4 PA_CL_VPORT_ZSCALE_15
1760x0002881C PA_CL_VS_OUT_CNTL
1770x00028818 PA_CL_VTE_CNTL
1780x00028C48 PA_SC_AA_MASK
1790x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
1800x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
1810x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
1820x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
1830x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
1840x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
1850x00028214 PA_SC_CLIPRECT_0_BR
1860x0002821C PA_SC_CLIPRECT_1_BR
1870x00028224 PA_SC_CLIPRECT_2_BR
1880x0002822C PA_SC_CLIPRECT_3_BR
1890x00028210 PA_SC_CLIPRECT_0_TL
1900x00028218 PA_SC_CLIPRECT_1_TL
1910x00028220 PA_SC_CLIPRECT_2_TL
1920x00028228 PA_SC_CLIPRECT_3_TL
1930x0002820C PA_SC_CLIPRECT_RULE
1940x00008BF0 PA_SC_ENHANCE
1950x00028244 PA_SC_GENERIC_SCISSOR_BR
1960x00028240 PA_SC_GENERIC_SCISSOR_TL
1970x00028C00 PA_SC_LINE_CNTL
1980x00028A0C PA_SC_LINE_STIPPLE
1990x00008B10 PA_SC_LINE_STIPPLE_STATE
2000x00028A4C PA_SC_MODE_CNTL
2010x00028A48 PA_SC_MPASS_PS_CNTL
2020x00008B20 PA_SC_MULTI_CHIP_CNTL
2030x00028034 PA_SC_SCREEN_SCISSOR_BR
2040x00028030 PA_SC_SCREEN_SCISSOR_TL
2050x00028254 PA_SC_VPORT_SCISSOR_0_BR
2060x0002825C PA_SC_VPORT_SCISSOR_1_BR
2070x00028264 PA_SC_VPORT_SCISSOR_2_BR
2080x0002826C PA_SC_VPORT_SCISSOR_3_BR
2090x00028274 PA_SC_VPORT_SCISSOR_4_BR
2100x0002827C PA_SC_VPORT_SCISSOR_5_BR
2110x00028284 PA_SC_VPORT_SCISSOR_6_BR
2120x0002828C PA_SC_VPORT_SCISSOR_7_BR
2130x00028294 PA_SC_VPORT_SCISSOR_8_BR
2140x0002829C PA_SC_VPORT_SCISSOR_9_BR
2150x000282A4 PA_SC_VPORT_SCISSOR_10_BR
2160x000282AC PA_SC_VPORT_SCISSOR_11_BR
2170x000282B4 PA_SC_VPORT_SCISSOR_12_BR
2180x000282BC PA_SC_VPORT_SCISSOR_13_BR
2190x000282C4 PA_SC_VPORT_SCISSOR_14_BR
2200x000282CC PA_SC_VPORT_SCISSOR_15_BR
2210x00028250 PA_SC_VPORT_SCISSOR_0_TL
2220x00028258 PA_SC_VPORT_SCISSOR_1_TL
2230x00028260 PA_SC_VPORT_SCISSOR_2_TL
2240x00028268 PA_SC_VPORT_SCISSOR_3_TL
2250x00028270 PA_SC_VPORT_SCISSOR_4_TL
2260x00028278 PA_SC_VPORT_SCISSOR_5_TL
2270x00028280 PA_SC_VPORT_SCISSOR_6_TL
2280x00028288 PA_SC_VPORT_SCISSOR_7_TL
2290x00028290 PA_SC_VPORT_SCISSOR_8_TL
2300x00028298 PA_SC_VPORT_SCISSOR_9_TL
2310x000282A0 PA_SC_VPORT_SCISSOR_10_TL
2320x000282A8 PA_SC_VPORT_SCISSOR_11_TL
2330x000282B0 PA_SC_VPORT_SCISSOR_12_TL
2340x000282B8 PA_SC_VPORT_SCISSOR_13_TL
2350x000282C0 PA_SC_VPORT_SCISSOR_14_TL
2360x000282C8 PA_SC_VPORT_SCISSOR_15_TL
2370x000282D4 PA_SC_VPORT_ZMAX_0
2380x000282DC PA_SC_VPORT_ZMAX_1
2390x000282E4 PA_SC_VPORT_ZMAX_2
2400x000282EC PA_SC_VPORT_ZMAX_3
2410x000282F4 PA_SC_VPORT_ZMAX_4
2420x000282FC PA_SC_VPORT_ZMAX_5
2430x00028304 PA_SC_VPORT_ZMAX_6
2440x0002830C PA_SC_VPORT_ZMAX_7
2450x00028314 PA_SC_VPORT_ZMAX_8
2460x0002831C PA_SC_VPORT_ZMAX_9
2470x00028324 PA_SC_VPORT_ZMAX_10
2480x0002832C PA_SC_VPORT_ZMAX_11
2490x00028334 PA_SC_VPORT_ZMAX_12
2500x0002833C PA_SC_VPORT_ZMAX_13
2510x00028344 PA_SC_VPORT_ZMAX_14
2520x0002834C PA_SC_VPORT_ZMAX_15
2530x000282D0 PA_SC_VPORT_ZMIN_0
2540x000282D8 PA_SC_VPORT_ZMIN_1
2550x000282E0 PA_SC_VPORT_ZMIN_2
2560x000282E8 PA_SC_VPORT_ZMIN_3
2570x000282F0 PA_SC_VPORT_ZMIN_4
2580x000282F8 PA_SC_VPORT_ZMIN_5
2590x00028300 PA_SC_VPORT_ZMIN_6
2600x00028308 PA_SC_VPORT_ZMIN_7
2610x00028310 PA_SC_VPORT_ZMIN_8
2620x00028318 PA_SC_VPORT_ZMIN_9
2630x00028320 PA_SC_VPORT_ZMIN_10
2640x00028328 PA_SC_VPORT_ZMIN_11
2650x00028330 PA_SC_VPORT_ZMIN_12
2660x00028338 PA_SC_VPORT_ZMIN_13
2670x00028340 PA_SC_VPORT_ZMIN_14
2680x00028348 PA_SC_VPORT_ZMIN_15
2690x00028200 PA_SC_WINDOW_OFFSET
2700x00028208 PA_SC_WINDOW_SCISSOR_BR
2710x00028204 PA_SC_WINDOW_SCISSOR_TL
2720x00028A08 PA_SU_LINE_CNTL
2730x00028A04 PA_SU_POINT_MINMAX
2740x00028A00 PA_SU_POINT_SIZE
2750x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
2760x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
2770x00028DFC PA_SU_POLY_OFFSET_CLAMP
2780x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
2790x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
2800x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
2810x00028814 PA_SU_SC_MODE_CNTL
2820x00028C08 PA_SU_VTX_CNTL
2830x00008C00 SQ_CONFIG
2840x00008C04 SQ_GPR_RESOURCE_MGMT_1
2850x00008C08 SQ_GPR_RESOURCE_MGMT_2
2860x00008C10 SQ_STACK_RESOURCE_MGMT_1
2870x00008C14 SQ_STACK_RESOURCE_MGMT_2
2880x00008C0C SQ_THREAD_RESOURCE_MGMT
2890x00028380 SQ_VTX_SEMANTIC_0
2900x00028384 SQ_VTX_SEMANTIC_1
2910x00028388 SQ_VTX_SEMANTIC_2
2920x0002838C SQ_VTX_SEMANTIC_3
2930x00028390 SQ_VTX_SEMANTIC_4
2940x00028394 SQ_VTX_SEMANTIC_5
2950x00028398 SQ_VTX_SEMANTIC_6
2960x0002839C SQ_VTX_SEMANTIC_7
2970x000283A0 SQ_VTX_SEMANTIC_8
2980x000283A4 SQ_VTX_SEMANTIC_9
2990x000283A8 SQ_VTX_SEMANTIC_10
3000x000283AC SQ_VTX_SEMANTIC_11
3010x000283B0 SQ_VTX_SEMANTIC_12
3020x000283B4 SQ_VTX_SEMANTIC_13
3030x000283B8 SQ_VTX_SEMANTIC_14
3040x000283BC SQ_VTX_SEMANTIC_15
3050x000283C0 SQ_VTX_SEMANTIC_16
3060x000283C4 SQ_VTX_SEMANTIC_17
3070x000283C8 SQ_VTX_SEMANTIC_18
3080x000283CC SQ_VTX_SEMANTIC_19
3090x000283D0 SQ_VTX_SEMANTIC_20
3100x000283D4 SQ_VTX_SEMANTIC_21
3110x000283D8 SQ_VTX_SEMANTIC_22
3120x000283DC SQ_VTX_SEMANTIC_23
3130x000283E0 SQ_VTX_SEMANTIC_24
3140x000283E4 SQ_VTX_SEMANTIC_25
3150x000283E8 SQ_VTX_SEMANTIC_26
3160x000283EC SQ_VTX_SEMANTIC_27
3170x000283F0 SQ_VTX_SEMANTIC_28
3180x000283F4 SQ_VTX_SEMANTIC_29
3190x000283F8 SQ_VTX_SEMANTIC_30
3200x000283FC SQ_VTX_SEMANTIC_31
3210x000288E0 SQ_VTX_SEMANTIC_CLEAR
3220x0003CFF4 SQ_VTX_START_INST_LOC
3230x0003C000 SQ_TEX_SAMPLER_WORD0_0
3240x0003C004 SQ_TEX_SAMPLER_WORD1_0
3250x0003C008 SQ_TEX_SAMPLER_WORD2_0
3260x00030000 SQ_ALU_CONSTANT0_0
3270x00030004 SQ_ALU_CONSTANT1_0
3280x00030008 SQ_ALU_CONSTANT2_0
3290x0003000C SQ_ALU_CONSTANT3_0
3300x0003E380 SQ_BOOL_CONST_0
3310x0003E384 SQ_BOOL_CONST_1
3320x0003E388 SQ_BOOL_CONST_2
3330x0003E200 SQ_LOOP_CONST_0
3340x0003E200 SQ_LOOP_CONST_DX10_0
3350x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
3360x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
3370x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
3380x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
3390x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
3400x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
3410x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
3420x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
3430x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
3440x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
3450x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
3460x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
3470x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
3480x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
3490x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
3500x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
3510x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
3520x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
3530x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
3540x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
3550x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
3560x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
3570x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
3580x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
3590x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
3600x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
3610x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
3620x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
3630x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
3640x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
3650x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
3660x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
3670x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
3680x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
3690x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
3700x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
3710x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
3720x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
3730x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
3740x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
3750x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
3760x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
3770x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
3780x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
3790x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
3800x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
3810x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
3820x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
3830x000289C0 SQ_ALU_CONST_CACHE_GS_0
3840x000289C4 SQ_ALU_CONST_CACHE_GS_1
3850x000289C8 SQ_ALU_CONST_CACHE_GS_2
3860x000289CC SQ_ALU_CONST_CACHE_GS_3
3870x000289D0 SQ_ALU_CONST_CACHE_GS_4
3880x000289D4 SQ_ALU_CONST_CACHE_GS_5
3890x000289D8 SQ_ALU_CONST_CACHE_GS_6
3900x000289DC SQ_ALU_CONST_CACHE_GS_7
3910x000289E0 SQ_ALU_CONST_CACHE_GS_8
3920x000289E4 SQ_ALU_CONST_CACHE_GS_9
3930x000289E8 SQ_ALU_CONST_CACHE_GS_10
3940x000289EC SQ_ALU_CONST_CACHE_GS_11
3950x000289F0 SQ_ALU_CONST_CACHE_GS_12
3960x000289F4 SQ_ALU_CONST_CACHE_GS_13
3970x000289F8 SQ_ALU_CONST_CACHE_GS_14
3980x000289FC SQ_ALU_CONST_CACHE_GS_15
3990x00028940 SQ_ALU_CONST_CACHE_PS_0
4000x00028944 SQ_ALU_CONST_CACHE_PS_1
4010x00028948 SQ_ALU_CONST_CACHE_PS_2
4020x0002894C SQ_ALU_CONST_CACHE_PS_3
4030x00028950 SQ_ALU_CONST_CACHE_PS_4
4040x00028954 SQ_ALU_CONST_CACHE_PS_5
4050x00028958 SQ_ALU_CONST_CACHE_PS_6
4060x0002895C SQ_ALU_CONST_CACHE_PS_7
4070x00028960 SQ_ALU_CONST_CACHE_PS_8
4080x00028964 SQ_ALU_CONST_CACHE_PS_9
4090x00028968 SQ_ALU_CONST_CACHE_PS_10
4100x0002896C SQ_ALU_CONST_CACHE_PS_11
4110x00028970 SQ_ALU_CONST_CACHE_PS_12
4120x00028974 SQ_ALU_CONST_CACHE_PS_13
4130x00028978 SQ_ALU_CONST_CACHE_PS_14
4140x0002897C SQ_ALU_CONST_CACHE_PS_15
4150x00028980 SQ_ALU_CONST_CACHE_VS_0
4160x00028984 SQ_ALU_CONST_CACHE_VS_1
4170x00028988 SQ_ALU_CONST_CACHE_VS_2
4180x0002898C SQ_ALU_CONST_CACHE_VS_3
4190x00028990 SQ_ALU_CONST_CACHE_VS_4
4200x00028994 SQ_ALU_CONST_CACHE_VS_5
4210x00028998 SQ_ALU_CONST_CACHE_VS_6
4220x0002899C SQ_ALU_CONST_CACHE_VS_7
4230x000289A0 SQ_ALU_CONST_CACHE_VS_8
4240x000289A4 SQ_ALU_CONST_CACHE_VS_9
4250x000289A8 SQ_ALU_CONST_CACHE_VS_10
4260x000289AC SQ_ALU_CONST_CACHE_VS_11
4270x000289B0 SQ_ALU_CONST_CACHE_VS_12
4280x000289B4 SQ_ALU_CONST_CACHE_VS_13
4290x000289B8 SQ_ALU_CONST_CACHE_VS_14
4300x000289BC SQ_ALU_CONST_CACHE_VS_15
4310x000288D8 SQ_PGM_CF_OFFSET_ES
4320x000288DC SQ_PGM_CF_OFFSET_FS
4330x000288D4 SQ_PGM_CF_OFFSET_GS
4340x000288CC SQ_PGM_CF_OFFSET_PS
4350x000288D0 SQ_PGM_CF_OFFSET_VS
4360x00028854 SQ_PGM_EXPORTS_PS
4370x00028890 SQ_PGM_RESOURCES_ES
4380x000288A4 SQ_PGM_RESOURCES_FS
4390x0002887C SQ_PGM_RESOURCES_GS
4400x00028850 SQ_PGM_RESOURCES_PS
4410x00028868 SQ_PGM_RESOURCES_VS
4420x00009100 SPI_CONFIG_CNTL
4430x0000913C SPI_CONFIG_CNTL_1
4440x000286DC SPI_FOG_CNTL
4450x000286E4 SPI_FOG_FUNC_BIAS
4460x000286E0 SPI_FOG_FUNC_SCALE
4470x000286D8 SPI_INPUT_Z
4480x000286D4 SPI_INTERP_CONTROL_0
4490x00028644 SPI_PS_INPUT_CNTL_0
4500x00028648 SPI_PS_INPUT_CNTL_1
4510x0002864C SPI_PS_INPUT_CNTL_2
4520x00028650 SPI_PS_INPUT_CNTL_3
4530x00028654 SPI_PS_INPUT_CNTL_4
4540x00028658 SPI_PS_INPUT_CNTL_5
4550x0002865C SPI_PS_INPUT_CNTL_6
4560x00028660 SPI_PS_INPUT_CNTL_7
4570x00028664 SPI_PS_INPUT_CNTL_8
4580x00028668 SPI_PS_INPUT_CNTL_9
4590x0002866C SPI_PS_INPUT_CNTL_10
4600x00028670 SPI_PS_INPUT_CNTL_11
4610x00028674 SPI_PS_INPUT_CNTL_12
4620x00028678 SPI_PS_INPUT_CNTL_13
4630x0002867C SPI_PS_INPUT_CNTL_14
4640x00028680 SPI_PS_INPUT_CNTL_15
4650x00028684 SPI_PS_INPUT_CNTL_16
4660x00028688 SPI_PS_INPUT_CNTL_17
4670x0002868C SPI_PS_INPUT_CNTL_18
4680x00028690 SPI_PS_INPUT_CNTL_19
4690x00028694 SPI_PS_INPUT_CNTL_20
4700x00028698 SPI_PS_INPUT_CNTL_21
4710x0002869C SPI_PS_INPUT_CNTL_22
4720x000286A0 SPI_PS_INPUT_CNTL_23
4730x000286A4 SPI_PS_INPUT_CNTL_24
4740x000286A8 SPI_PS_INPUT_CNTL_25
4750x000286AC SPI_PS_INPUT_CNTL_26
4760x000286B0 SPI_PS_INPUT_CNTL_27
4770x000286B4 SPI_PS_INPUT_CNTL_28
4780x000286B8 SPI_PS_INPUT_CNTL_29
4790x000286BC SPI_PS_INPUT_CNTL_30
4800x000286C0 SPI_PS_INPUT_CNTL_31
4810x000286CC SPI_PS_IN_CONTROL_0
4820x000286D0 SPI_PS_IN_CONTROL_1
4830x000286C4 SPI_VS_OUT_CONFIG
4840x00028614 SPI_VS_OUT_ID_0
4850x00028618 SPI_VS_OUT_ID_1
4860x0002861C SPI_VS_OUT_ID_2
4870x00028620 SPI_VS_OUT_ID_3
4880x00028624 SPI_VS_OUT_ID_4
4890x00028628 SPI_VS_OUT_ID_5
4900x0002862C SPI_VS_OUT_ID_6
4910x00028630 SPI_VS_OUT_ID_7
4920x00028634 SPI_VS_OUT_ID_8
4930x00028638 SPI_VS_OUT_ID_9
4940x00028438 SX_ALPHA_REF
4950x00028410 SX_ALPHA_TEST_CONTROL
4960x00028350 SX_MISC
4970x0000A020 SMX_DC_CTL0
4980x0000A024 SMX_DC_CTL1
4990x0000A028 SMX_DC_CTL2
5000x00009608 TC_CNTL
5010x00009604 TC_INVALIDATE
5020x00009490 TD_CNTL
5030x00009400 TD_FILTER4
5040x00009404 TD_FILTER4_1
5050x00009408 TD_FILTER4_2
5060x0000940C TD_FILTER4_3
5070x00009410 TD_FILTER4_4
5080x00009414 TD_FILTER4_5
5090x00009418 TD_FILTER4_6
5100x0000941C TD_FILTER4_7
5110x00009420 TD_FILTER4_8
5120x00009424 TD_FILTER4_9
5130x00009428 TD_FILTER4_10
5140x0000942C TD_FILTER4_11
5150x00009430 TD_FILTER4_12
5160x00009434 TD_FILTER4_13
5170x00009438 TD_FILTER4_14
5180x0000943C TD_FILTER4_15
5190x00009440 TD_FILTER4_16
5200x00009444 TD_FILTER4_17
5210x00009448 TD_FILTER4_18
5220x0000944C TD_FILTER4_19
5230x00009450 TD_FILTER4_20
5240x00009454 TD_FILTER4_21
5250x00009458 TD_FILTER4_22
5260x0000945C TD_FILTER4_23
5270x00009460 TD_FILTER4_24
5280x00009464 TD_FILTER4_25
5290x00009468 TD_FILTER4_26
5300x0000946C TD_FILTER4_27
5310x00009470 TD_FILTER4_28
5320x00009474 TD_FILTER4_29
5330x00009478 TD_FILTER4_30
5340x0000947C TD_FILTER4_31
5350x00009480 TD_FILTER4_32
5360x00009484 TD_FILTER4_33
5370x00009488 TD_FILTER4_34
5380x0000948C TD_FILTER4_35
5390x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
5400x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
5410x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
5420x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
5430x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
5440x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
5450x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
5460x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
5470x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
5480x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
5490x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
5500x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
5510x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
5520x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
5530x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
5540x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
5550x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
5560x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
5570x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
5580x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
5590x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
5600x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
5610x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
5620x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
5630x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
5640x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
5650x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
5660x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
5670x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
5680x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
5690x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
5700x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
5710x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
5720x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
5730x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
5740x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
5750x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
5760x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
5770x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
5780x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
5790x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
5800x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
5810x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
5820x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
5830x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
5840x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
5850x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
5860x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
5870x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
5880x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
5890x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
5900x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
5910x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
5920x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
5930x0000A800 TD_GS_SAMPLER0_BORDER_RED
5940x0000A810 TD_GS_SAMPLER1_BORDER_RED
5950x0000A820 TD_GS_SAMPLER2_BORDER_RED
5960x0000A830 TD_GS_SAMPLER3_BORDER_RED
5970x0000A840 TD_GS_SAMPLER4_BORDER_RED
5980x0000A850 TD_GS_SAMPLER5_BORDER_RED
5990x0000A860 TD_GS_SAMPLER6_BORDER_RED
6000x0000A870 TD_GS_SAMPLER7_BORDER_RED
6010x0000A880 TD_GS_SAMPLER8_BORDER_RED
6020x0000A890 TD_GS_SAMPLER9_BORDER_RED
6030x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
6040x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
6050x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
6060x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
6070x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
6080x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
6090x0000A900 TD_GS_SAMPLER16_BORDER_RED
6100x0000A910 TD_GS_SAMPLER17_BORDER_RED
6110x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
6120x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
6130x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
6140x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
6150x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
6160x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
6170x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
6180x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
6190x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
6200x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
6210x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
6220x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
6230x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
6240x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
6250x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
6260x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
6270x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
6280x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
6290x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
6300x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
6310x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
6320x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
6330x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
6340x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
6350x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
6360x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
6370x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
6380x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
6390x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
6400x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
6410x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
6420x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
6430x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
6440x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
6450x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
6460x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
6470x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
6480x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
6490x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
6500x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
6510x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
6520x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
6530x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
6540x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
6550x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
6560x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
6570x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
6580x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
6590x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
6600x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
6610x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
6620x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
6630x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
6640x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
6650x0000A400 TD_PS_SAMPLER0_BORDER_RED
6660x0000A410 TD_PS_SAMPLER1_BORDER_RED
6670x0000A420 TD_PS_SAMPLER2_BORDER_RED
6680x0000A430 TD_PS_SAMPLER3_BORDER_RED
6690x0000A440 TD_PS_SAMPLER4_BORDER_RED
6700x0000A450 TD_PS_SAMPLER5_BORDER_RED
6710x0000A460 TD_PS_SAMPLER6_BORDER_RED
6720x0000A470 TD_PS_SAMPLER7_BORDER_RED
6730x0000A480 TD_PS_SAMPLER8_BORDER_RED
6740x0000A490 TD_PS_SAMPLER9_BORDER_RED
6750x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
6760x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
6770x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
6780x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
6790x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
6800x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
6810x0000A500 TD_PS_SAMPLER16_BORDER_RED
6820x0000A510 TD_PS_SAMPLER17_BORDER_RED
6830x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
6840x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
6850x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
6860x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
6870x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
6880x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
6890x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
6900x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
6910x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
6920x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
6930x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
6940x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
6950x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
6960x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
6970x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
6980x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
6990x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
7000x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
7010x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
7020x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
7030x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
7040x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
7050x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
7060x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
7070x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
7080x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
7090x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
7100x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
7110x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
7120x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
7130x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
7140x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
7150x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
7160x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
7170x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
7180x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
7190x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
7200x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
7210x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
7220x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
7230x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
7240x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
7250x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
7260x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
7270x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
7280x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
7290x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
7300x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
7310x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
7320x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
7330x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
7340x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
7350x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
7360x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
7370x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
7380x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
7390x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
7400x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
7410x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
7420x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
7430x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
7440x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
7450x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
7460x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
7470x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
7480x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
7490x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
7500x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
7510x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
7520x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
7530x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
7540x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
7550x0000A600 TD_VS_SAMPLER0_BORDER_RED
7560x0000A610 TD_VS_SAMPLER1_BORDER_RED
7570x0000A620 TD_VS_SAMPLER2_BORDER_RED
7580x0000A630 TD_VS_SAMPLER3_BORDER_RED
7590x0000A640 TD_VS_SAMPLER4_BORDER_RED
7600x0000A650 TD_VS_SAMPLER5_BORDER_RED
7610x0000A660 TD_VS_SAMPLER6_BORDER_RED
7620x0000A670 TD_VS_SAMPLER7_BORDER_RED
7630x0000A680 TD_VS_SAMPLER8_BORDER_RED
7640x0000A690 TD_VS_SAMPLER9_BORDER_RED
7650x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
7660x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
7670x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
7680x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
7690x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
7700x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
7710x0000A700 TD_VS_SAMPLER16_BORDER_RED
7720x0000A710 TD_VS_SAMPLER17_BORDER_RED
7730x00009508 TA_CNTL_AUX
7740x0002802C DB_DEPTH_CLEAR
7750x00028D24 DB_HTILE_SURFACE
7760x00028D34 DB_PREFETCH_LIMIT
7770x00028D30 DB_PRELOAD_CONTROL
7780x00028D0C DB_RENDER_CONTROL
7790x00028D10 DB_RENDER_OVERRIDE
7800x0002880C DB_SHADER_CONTROL
7810x00028D2C DB_SRESULTS_COMPARE_STATE1
7820x00028430 DB_STENCILREFMASK
7830x00028434 DB_STENCILREFMASK_BF
7840x00028028 DB_STENCIL_CLEAR
7850x00028780 CB_BLEND0_CONTROL
7860x00028784 CB_BLEND1_CONTROL
7870x00028788 CB_BLEND2_CONTROL
7880x0002878C CB_BLEND3_CONTROL
7890x00028790 CB_BLEND4_CONTROL
7900x00028794 CB_BLEND5_CONTROL
7910x00028798 CB_BLEND6_CONTROL
7920x0002879C CB_BLEND7_CONTROL
7930x00028804 CB_BLEND_CONTROL
7940x00028420 CB_BLEND_ALPHA
7950x0002841C CB_BLEND_BLUE
7960x00028418 CB_BLEND_GREEN
7970x00028414 CB_BLEND_RED
7980x0002812C CB_CLEAR_ALPHA
7990x00028128 CB_CLEAR_BLUE
8000x00028124 CB_CLEAR_GREEN
8010x00028120 CB_CLEAR_RED
8020x00028C30 CB_CLRCMP_CONTROL
8030x00028C38 CB_CLRCMP_DST
8040x00028C3C CB_CLRCMP_MSK
8050x00028C34 CB_CLRCMP_SRC
8060x00028100 CB_COLOR0_MASK
8070x00028104 CB_COLOR1_MASK
8080x00028108 CB_COLOR2_MASK
8090x0002810C CB_COLOR3_MASK
8100x00028110 CB_COLOR4_MASK
8110x00028114 CB_COLOR5_MASK
8120x00028118 CB_COLOR6_MASK
8130x0002811C CB_COLOR7_MASK
8140x00028080 CB_COLOR0_VIEW
8150x00028084 CB_COLOR1_VIEW
8160x00028088 CB_COLOR2_VIEW
8170x0002808C CB_COLOR3_VIEW
8180x00028090 CB_COLOR4_VIEW
8190x00028094 CB_COLOR5_VIEW
8200x00028098 CB_COLOR6_VIEW
8210x0002809C CB_COLOR7_VIEW
8220x00028808 CB_COLOR_CONTROL
8230x0002842C CB_FOG_BLUE
8240x00028428 CB_FOG_GREEN
8250x00028424 CB_FOG_RED
8260x00008040 WAIT_UNTIL
8270x00008950 CC_GC_SHADER_PIPE_CONFIG
8280x00008954 GC_USER_SHADER_PIPE_CONFIG
8290x00009714 VC_ENHANCE
8300x00009830 DB_DEBUG
8310x00009838 DB_WATERMARKS
8320x00028D28 DB_SRESULTS_COMPARE_STATE0
8330x00028D44 DB_ALPHA_TO_MASK
8340x00009504 TA_CNTL
8350x00009700 VC_CNTL
8360x00009718 VC_CONFIG
8370x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 287fcebfb4e6..626d51891ee9 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
113 uint32_t size_reg; 113 uint32_t size_reg;
114 uint32_t tmp; 114 uint32_t tmp;
115 115
116 radeon_gart_restore(rdev);
116 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 117 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
117 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 118 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
118 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 119 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -150,9 +151,8 @@ int rs400_gart_enable(struct radeon_device *rdev)
150 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); 151 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
151 WREG32(RS480_AGP_BASE_2, 0); 152 WREG32(RS480_AGP_BASE_2, 0);
152 } 153 }
153 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 154 tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
154 tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16); 155 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
155 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
156 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 156 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
157 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); 157 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
158 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 158 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
@@ -251,14 +251,19 @@ void rs400_gpu_init(struct radeon_device *rdev)
251 } 251 }
252} 252}
253 253
254void rs400_vram_info(struct radeon_device *rdev) 254void rs400_mc_init(struct radeon_device *rdev)
255{ 255{
256 u64 base;
257
256 rs400_gart_adjust_size(rdev); 258 rs400_gart_adjust_size(rdev);
259 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
257 /* DDR for all card after R300 & IGP */ 260 /* DDR for all card after R300 & IGP */
258 rdev->mc.vram_is_ddr = true; 261 rdev->mc.vram_is_ddr = true;
259 rdev->mc.vram_width = 128; 262 rdev->mc.vram_width = 128;
260
261 r100_vram_init_sizes(rdev); 263 r100_vram_init_sizes(rdev);
264 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
265 radeon_vram_location(rdev, &rdev->mc, base);
266 radeon_gtt_location(rdev, &rdev->mc);
262} 267}
263 268
264uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 269uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -362,22 +367,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
362#endif 367#endif
363} 368}
364 369
365static int rs400_mc_init(struct radeon_device *rdev)
366{
367 int r;
368 u32 tmp;
369
370 /* Setup GPU memory space */
371 tmp = RREG32(R_00015C_NB_TOM);
372 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
373 rdev->mc.gtt_location = 0xFFFFFFFFUL;
374 r = radeon_mc_setup(rdev);
375 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
376 if (r)
377 return r;
378 return 0;
379}
380
381void rs400_mc_program(struct radeon_device *rdev) 370void rs400_mc_program(struct radeon_device *rdev)
382{ 371{
383 struct r100_mc_save save; 372 struct r100_mc_save save;
@@ -516,12 +505,8 @@ int rs400_init(struct radeon_device *rdev)
516 radeon_get_clock_info(rdev->ddev); 505 radeon_get_clock_info(rdev->ddev);
517 /* Initialize power management */ 506 /* Initialize power management */
518 radeon_pm_init(rdev); 507 radeon_pm_init(rdev);
519 /* Get vram informations */ 508 /* initialize memory controller */
520 rs400_vram_info(rdev); 509 rs400_mc_init(rdev);
521 /* Initialize memory controller (also test AGP) */
522 r = rs400_mc_init(rdev);
523 if (r)
524 return r;
525 /* Fence driver */ 510 /* Fence driver */
526 r = radeon_fence_driver_init(rdev); 511 r = radeon_fence_driver_init(rdev);
527 if (r) 512 if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index c3818562a13e..47f046b78c6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,23 +45,6 @@
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48int rs600_mc_init(struct radeon_device *rdev)
49{
50 /* read back the MC value from the hw */
51 int r;
52 u32 tmp;
53
54 /* Setup GPU memory space */
55 tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev);
59 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
60 if (r)
61 return r;
62 return 0;
63}
64
65/* hpd for digital panel detect/disconnect */ 48/* hpd for digital panel detect/disconnect */
66bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 49bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
67{ 50{
@@ -213,6 +196,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
213 r = radeon_gart_table_vram_pin(rdev); 196 r = radeon_gart_table_vram_pin(rdev);
214 if (r) 197 if (r)
215 return r; 198 return r;
199 radeon_gart_restore(rdev);
216 /* Enable bus master */ 200 /* Enable bus master */
217 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; 201 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
218 WREG32(R_00004C_BUS_CNTL, tmp); 202 WREG32(R_00004C_BUS_CNTL, tmp);
@@ -406,10 +390,14 @@ int rs600_irq_process(struct radeon_device *rdev)
406 if (G_000044_SW_INT(status)) 390 if (G_000044_SW_INT(status))
407 radeon_fence_process(rdev); 391 radeon_fence_process(rdev);
408 /* Vertical blank interrupts */ 392 /* Vertical blank interrupts */
409 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) 393 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
410 drm_handle_vblank(rdev->ddev, 0); 394 drm_handle_vblank(rdev->ddev, 0);
411 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) 395 wake_up(&rdev->irq.vblank_queue);
396 }
397 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
412 drm_handle_vblank(rdev->ddev, 1); 398 drm_handle_vblank(rdev->ddev, 1);
399 wake_up(&rdev->irq.vblank_queue);
400 }
413 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { 401 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
414 queue_hotplug = true; 402 queue_hotplug = true;
415 DRM_DEBUG("HPD1\n"); 403 DRM_DEBUG("HPD1\n");
@@ -470,22 +458,22 @@ void rs600_gpu_init(struct radeon_device *rdev)
470 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 458 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
471} 459}
472 460
473void rs600_vram_info(struct radeon_device *rdev) 461void rs600_mc_init(struct radeon_device *rdev)
474{ 462{
463 u64 base;
464
465 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
466 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
475 rdev->mc.vram_is_ddr = true; 467 rdev->mc.vram_is_ddr = true;
476 rdev->mc.vram_width = 128; 468 rdev->mc.vram_width = 128;
477
478 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 469 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
479 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 470 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
480 471 rdev->mc.visible_vram_size = rdev->mc.aper_size;
481 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 472 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
482 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 473 base = RREG32_MC(R_000004_MC_FB_LOCATION);
483 474 base = G_000004_MC_FB_START(base) << 16;
484 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 475 radeon_vram_location(rdev, &rdev->mc, base);
485 rdev->mc.mc_vram_size = rdev->mc.aper_size; 476 radeon_gtt_location(rdev, &rdev->mc);
486
487 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
488 rdev->mc.real_vram_size = rdev->mc.aper_size;
489} 477}
490 478
491void rs600_bandwidth_update(struct radeon_device *rdev) 479void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -661,12 +649,8 @@ int rs600_init(struct radeon_device *rdev)
661 radeon_get_clock_info(rdev->ddev); 649 radeon_get_clock_info(rdev->ddev);
662 /* Initialize power management */ 650 /* Initialize power management */
663 radeon_pm_init(rdev); 651 radeon_pm_init(rdev);
664 /* Get vram informations */ 652 /* initialize memory controller */
665 rs600_vram_info(rdev); 653 rs600_mc_init(rdev);
666 /* Initialize memory controller (also test AGP) */
667 r = rs600_mc_init(rdev);
668 if (r)
669 return r;
670 rs600_debugfs(rdev); 654 rs600_debugfs(rdev);
671 /* Fence driver */ 655 /* Fence driver */
672 r = radeon_fence_driver_init(rdev); 656 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 06e2771aee5a..83b9174f76f2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -129,27 +129,21 @@ void rs690_pm_info(struct radeon_device *rdev)
129 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); 129 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
130} 130}
131 131
132void rs690_vram_info(struct radeon_device *rdev) 132void rs690_mc_init(struct radeon_device *rdev)
133{ 133{
134 fixed20_12 a; 134 fixed20_12 a;
135 u64 base;
135 136
136 rs400_gart_adjust_size(rdev); 137 rs400_gart_adjust_size(rdev);
137
138 rdev->mc.vram_is_ddr = true; 138 rdev->mc.vram_is_ddr = true;
139 rdev->mc.vram_width = 128; 139 rdev->mc.vram_width = 128;
140
141 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 140 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
142 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 141 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
143
144 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 142 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
145 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 143 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
146 144 rdev->mc.visible_vram_size = rdev->mc.aper_size;
147 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 145 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
148 rdev->mc.mc_vram_size = rdev->mc.aper_size; 146 base = G_000100_MC_FB_START(base) << 16;
149
150 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
151 rdev->mc.real_vram_size = rdev->mc.aper_size;
152
153 rs690_pm_info(rdev); 147 rs690_pm_info(rdev);
154 /* FIXME: we should enforce default clock in case GPU is not in 148 /* FIXME: we should enforce default clock in case GPU is not in
155 * default setup 149 * default setup
@@ -160,22 +154,9 @@ void rs690_vram_info(struct radeon_device *rdev)
160 a.full = rfixed_const(16); 154 a.full = rfixed_const(16);
161 /* core_bandwidth = sclk(Mhz) * 16 */ 155 /* core_bandwidth = sclk(Mhz) * 16 */
162 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); 156 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
163}
164
165static int rs690_mc_init(struct radeon_device *rdev)
166{
167 int r;
168 u32 tmp;
169
170 /* Setup GPU memory space */
171 tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
173 rdev->mc.gtt_location = 0xFFFFFFFFUL;
174 r = radeon_mc_setup(rdev);
175 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 157 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
176 if (r) 158 radeon_vram_location(rdev, &rdev->mc, base);
177 return r; 159 radeon_gtt_location(rdev, &rdev->mc);
178 return 0;
179} 160}
180 161
181void rs690_line_buffer_adjust(struct radeon_device *rdev, 162void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -728,12 +709,8 @@ int rs690_init(struct radeon_device *rdev)
728 radeon_get_clock_info(rdev->ddev); 709 radeon_get_clock_info(rdev->ddev);
729 /* Initialize power management */ 710 /* Initialize power management */
730 radeon_pm_init(rdev); 711 radeon_pm_init(rdev);
731 /* Get vram informations */ 712 /* initialize memory controller */
732 rs690_vram_info(rdev); 713 rs690_mc_init(rdev);
733 /* Initialize memory controller (also test AGP) */
734 r = rs690_mc_init(rdev);
735 if (r)
736 return r;
737 rv515_debugfs(rdev); 714 rv515_debugfs(rdev);
738 /* Fence driver */ 715 /* Fence driver */
739 r = radeon_fence_driver_init(rdev); 716 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0e1e6b8632b8..bea747da123f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -277,13 +277,15 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
277 } 277 }
278} 278}
279 279
280void rv515_vram_info(struct radeon_device *rdev) 280void rv515_mc_init(struct radeon_device *rdev)
281{ 281{
282 fixed20_12 a; 282 fixed20_12 a;
283 283
284 rv515_vram_get_type(rdev); 284 rv515_vram_get_type(rdev);
285
286 r100_vram_init_sizes(rdev); 285 r100_vram_init_sizes(rdev);
286 radeon_vram_location(rdev, &rdev->mc, 0);
287 if (!(rdev->flags & RADEON_IS_AGP))
288 radeon_gtt_location(rdev, &rdev->mc);
287 /* FIXME: we should enforce default clock in case GPU is not in 289 /* FIXME: we should enforce default clock in case GPU is not in
288 * default setup 290 * default setup
289 */ 291 */
@@ -587,12 +589,15 @@ int rv515_init(struct radeon_device *rdev)
587 radeon_get_clock_info(rdev->ddev); 589 radeon_get_clock_info(rdev->ddev);
588 /* Initialize power management */ 590 /* Initialize power management */
589 radeon_pm_init(rdev); 591 radeon_pm_init(rdev);
590 /* Get vram informations */ 592 /* initialize AGP */
591 rv515_vram_info(rdev); 593 if (rdev->flags & RADEON_IS_AGP) {
592 /* Initialize memory controller (also test AGP) */ 594 r = radeon_agp_init(rdev);
593 r = r420_mc_init(rdev); 595 if (r) {
594 if (r) 596 radeon_agp_disable(rdev);
595 return r; 597 }
598 }
599 /* initialize memory controller */
600 rv515_mc_init(rdev);
596 rv515_debugfs(rdev); 601 rv515_debugfs(rdev);
597 /* Fence driver */ 602 /* Fence driver */
598 r = radeon_fence_driver_init(rdev); 603 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 03021674d097..37887dee12af 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
56 r = radeon_gart_table_vram_pin(rdev); 56 r = radeon_gart_table_vram_pin(rdev);
57 if (r) 57 if (r)
58 return r; 58 return r;
59 radeon_gart_restore(rdev);
59 /* Setup L2 cache */ 60 /* Setup L2 cache */
60 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 61 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
61 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 62 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -273,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
273/* 274/*
274 * Core functions 275 * Core functions
275 */ 276 */
276static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 277static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
277 u32 num_backends, 278 u32 num_tile_pipes,
278 u32 backend_disable_mask) 279 u32 num_backends,
280 u32 backend_disable_mask)
279{ 281{
280 u32 backend_map = 0; 282 u32 backend_map = 0;
281 u32 enabled_backends_mask; 283 u32 enabled_backends_mask;
@@ -284,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
284 u32 swizzle_pipe[R7XX_MAX_PIPES]; 286 u32 swizzle_pipe[R7XX_MAX_PIPES];
285 u32 cur_backend; 287 u32 cur_backend;
286 u32 i; 288 u32 i;
289 bool force_no_swizzle;
287 290
288 if (num_tile_pipes > R7XX_MAX_PIPES) 291 if (num_tile_pipes > R7XX_MAX_PIPES)
289 num_tile_pipes = R7XX_MAX_PIPES; 292 num_tile_pipes = R7XX_MAX_PIPES;
@@ -313,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
313 if (enabled_backends_count != num_backends) 316 if (enabled_backends_count != num_backends)
314 num_backends = enabled_backends_count; 317 num_backends = enabled_backends_count;
315 318
319 switch (rdev->family) {
320 case CHIP_RV770:
321 case CHIP_RV730:
322 force_no_swizzle = false;
323 break;
324 case CHIP_RV710:
325 case CHIP_RV740:
326 default:
327 force_no_swizzle = true;
328 break;
329 }
330
316 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); 331 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
317 switch (num_tile_pipes) { 332 switch (num_tile_pipes) {
318 case 1: 333 case 1:
@@ -323,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
323 swizzle_pipe[1] = 1; 338 swizzle_pipe[1] = 1;
324 break; 339 break;
325 case 3: 340 case 3:
326 swizzle_pipe[0] = 0; 341 if (force_no_swizzle) {
327 swizzle_pipe[1] = 2; 342 swizzle_pipe[0] = 0;
328 swizzle_pipe[2] = 1; 343 swizzle_pipe[1] = 1;
344 swizzle_pipe[2] = 2;
345 } else {
346 swizzle_pipe[0] = 0;
347 swizzle_pipe[1] = 2;
348 swizzle_pipe[2] = 1;
349 }
329 break; 350 break;
330 case 4: 351 case 4:
331 swizzle_pipe[0] = 0; 352 if (force_no_swizzle) {
332 swizzle_pipe[1] = 2; 353 swizzle_pipe[0] = 0;
333 swizzle_pipe[2] = 3; 354 swizzle_pipe[1] = 1;
334 swizzle_pipe[3] = 1; 355 swizzle_pipe[2] = 2;
356 swizzle_pipe[3] = 3;
357 } else {
358 swizzle_pipe[0] = 0;
359 swizzle_pipe[1] = 2;
360 swizzle_pipe[2] = 3;
361 swizzle_pipe[3] = 1;
362 }
335 break; 363 break;
336 case 5: 364 case 5:
337 swizzle_pipe[0] = 0; 365 if (force_no_swizzle) {
338 swizzle_pipe[1] = 2; 366 swizzle_pipe[0] = 0;
339 swizzle_pipe[2] = 4; 367 swizzle_pipe[1] = 1;
340 swizzle_pipe[3] = 1; 368 swizzle_pipe[2] = 2;
341 swizzle_pipe[4] = 3; 369 swizzle_pipe[3] = 3;
370 swizzle_pipe[4] = 4;
371 } else {
372 swizzle_pipe[0] = 0;
373 swizzle_pipe[1] = 2;
374 swizzle_pipe[2] = 4;
375 swizzle_pipe[3] = 1;
376 swizzle_pipe[4] = 3;
377 }
342 break; 378 break;
343 case 6: 379 case 6:
344 swizzle_pipe[0] = 0; 380 if (force_no_swizzle) {
345 swizzle_pipe[1] = 2; 381 swizzle_pipe[0] = 0;
346 swizzle_pipe[2] = 4; 382 swizzle_pipe[1] = 1;
347 swizzle_pipe[3] = 5; 383 swizzle_pipe[2] = 2;
348 swizzle_pipe[4] = 3; 384 swizzle_pipe[3] = 3;
349 swizzle_pipe[5] = 1; 385 swizzle_pipe[4] = 4;
386 swizzle_pipe[5] = 5;
387 } else {
388 swizzle_pipe[0] = 0;
389 swizzle_pipe[1] = 2;
390 swizzle_pipe[2] = 4;
391 swizzle_pipe[3] = 5;
392 swizzle_pipe[4] = 3;
393 swizzle_pipe[5] = 1;
394 }
350 break; 395 break;
351 case 7: 396 case 7:
352 swizzle_pipe[0] = 0; 397 if (force_no_swizzle) {
353 swizzle_pipe[1] = 2; 398 swizzle_pipe[0] = 0;
354 swizzle_pipe[2] = 4; 399 swizzle_pipe[1] = 1;
355 swizzle_pipe[3] = 6; 400 swizzle_pipe[2] = 2;
356 swizzle_pipe[4] = 3; 401 swizzle_pipe[3] = 3;
357 swizzle_pipe[5] = 1; 402 swizzle_pipe[4] = 4;
358 swizzle_pipe[6] = 5; 403 swizzle_pipe[5] = 5;
404 swizzle_pipe[6] = 6;
405 } else {
406 swizzle_pipe[0] = 0;
407 swizzle_pipe[1] = 2;
408 swizzle_pipe[2] = 4;
409 swizzle_pipe[3] = 6;
410 swizzle_pipe[4] = 3;
411 swizzle_pipe[5] = 1;
412 swizzle_pipe[6] = 5;
413 }
359 break; 414 break;
360 case 8: 415 case 8:
361 swizzle_pipe[0] = 0; 416 if (force_no_swizzle) {
362 swizzle_pipe[1] = 2; 417 swizzle_pipe[0] = 0;
363 swizzle_pipe[2] = 4; 418 swizzle_pipe[1] = 1;
364 swizzle_pipe[3] = 6; 419 swizzle_pipe[2] = 2;
365 swizzle_pipe[4] = 3; 420 swizzle_pipe[3] = 3;
366 swizzle_pipe[5] = 1; 421 swizzle_pipe[4] = 4;
367 swizzle_pipe[6] = 7; 422 swizzle_pipe[5] = 5;
368 swizzle_pipe[7] = 5; 423 swizzle_pipe[6] = 6;
424 swizzle_pipe[7] = 7;
425 } else {
426 swizzle_pipe[0] = 0;
427 swizzle_pipe[1] = 2;
428 swizzle_pipe[2] = 4;
429 swizzle_pipe[3] = 6;
430 swizzle_pipe[4] = 3;
431 swizzle_pipe[5] = 1;
432 swizzle_pipe[6] = 7;
433 swizzle_pipe[7] = 5;
434 }
369 break; 435 break;
370 } 436 }
371 437
@@ -385,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
385static void rv770_gpu_init(struct radeon_device *rdev) 451static void rv770_gpu_init(struct radeon_device *rdev)
386{ 452{
387 int i, j, num_qd_pipes; 453 int i, j, num_qd_pipes;
454 u32 ta_aux_cntl;
388 u32 sx_debug_1; 455 u32 sx_debug_1;
389 u32 smx_dc_ctl0; 456 u32 smx_dc_ctl0;
457 u32 db_debug3;
390 u32 num_gs_verts_per_thread; 458 u32 num_gs_verts_per_thread;
391 u32 vgt_gs_per_es; 459 u32 vgt_gs_per_es;
392 u32 gs_prim_buffer_depth = 0; 460 u32 gs_prim_buffer_depth = 0;
@@ -515,6 +583,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
515 583
516 switch (rdev->config.rv770.max_tile_pipes) { 584 switch (rdev->config.rv770.max_tile_pipes) {
517 case 1: 585 case 1:
586 default:
518 gb_tiling_config |= PIPE_TILING(0); 587 gb_tiling_config |= PIPE_TILING(0);
519 break; 588 break;
520 case 2: 589 case 2:
@@ -526,16 +595,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
526 case 8: 595 case 8:
527 gb_tiling_config |= PIPE_TILING(3); 596 gb_tiling_config |= PIPE_TILING(3);
528 break; 597 break;
529 default:
530 break;
531 } 598 }
599 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
532 600
533 if (rdev->family == CHIP_RV770) 601 if (rdev->family == CHIP_RV770)
534 gb_tiling_config |= BANK_TILING(1); 602 gb_tiling_config |= BANK_TILING(1);
535 else 603 else
536 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 604 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
605 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
537 606
538 gb_tiling_config |= GROUP_SIZE(0); 607 gb_tiling_config |= GROUP_SIZE(0);
608 rdev->config.rv770.tiling_group_size = 256;
539 609
540 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 610 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
541 gb_tiling_config |= ROW_TILING(3); 611 gb_tiling_config |= ROW_TILING(3);
@@ -549,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 619
550 gb_tiling_config |= BANK_SWAPS(1); 620 gb_tiling_config |= BANK_SWAPS(1);
551 621
552 if (rdev->family == CHIP_RV740) 622 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
553 backend_map = 0x28; 623 cc_rb_backend_disable |=
554 else 624 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
558 gb_tiling_config |= BACKEND_MAP(backend_map);
559 625
560 cc_gc_shader_pipe_config = 626 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
627 cc_gc_shader_pipe_config |=
561 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); 628 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
562 cc_gc_shader_pipe_config |= 629 cc_gc_shader_pipe_config |=
563 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); 630 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
564 631
565 cc_rb_backend_disable = 632 if (rdev->family == CHIP_RV740)
566 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); 633 backend_map = 0x28;
634 else
635 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
636 rdev->config.rv770.max_tile_pipes,
637 (R7XX_MAX_BACKENDS -
638 r600_count_pipe_bits((cc_rb_backend_disable &
639 R7XX_MAX_BACKENDS_MASK) >> 16)),
640 (cc_rb_backend_disable >> 16));
641 gb_tiling_config |= BACKEND_MAP(backend_map);
642
567 643
568 WREG32(GB_TILING_CONFIG, gb_tiling_config); 644 WREG32(GB_TILING_CONFIG, gb_tiling_config);
569 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 645 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -571,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
571 647
572 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 648 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
573 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 649 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
574 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 650 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
575 651
576 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
577 WREG32(CGTS_SYS_TCC_DISABLE, 0); 652 WREG32(CGTS_SYS_TCC_DISABLE, 0);
578 WREG32(CGTS_TCC_DISABLE, 0); 653 WREG32(CGTS_TCC_DISABLE, 0);
579 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
580 WREG32(CGTS_USER_TCC_DISABLE, 0);
581 654
582 num_qd_pipes = 655 num_qd_pipes =
583 R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK); 656 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
584 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 657 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
585 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 658 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
586 659
@@ -590,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
590 663
591 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 664 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
592 665
593 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | 666 ta_aux_cntl = RREG32(TA_CNTL_AUX);
594 SYNC_GRADIENT | 667 WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
595 SYNC_WALKER |
596 SYNC_ALIGNER));
597 668
598 sx_debug_1 = RREG32(SX_DEBUG_1); 669 sx_debug_1 = RREG32(SX_DEBUG_1);
599 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 670 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -604,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
604 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); 675 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
605 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 676 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
606 677
607 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | 678 if (rdev->family != CHIP_RV740)
608 GS_FLUSH_CTL(4) | 679 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
609 ACK_FLUSH_CTL(3) | 680 GS_FLUSH_CTL(4) |
610 SYNC_FLUSH_CTL)); 681 ACK_FLUSH_CTL(3) |
682 SYNC_FLUSH_CTL));
611 683
612 if (rdev->family == CHIP_RV770) 684 db_debug3 = RREG32(DB_DEBUG3);
613 WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); 685 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
614 else { 686 switch (rdev->family) {
687 case CHIP_RV770:
688 case CHIP_RV740:
689 db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
690 break;
691 case CHIP_RV710:
692 case CHIP_RV730:
693 default:
694 db_debug3 |= DB_CLK_OFF_DELAY(2);
695 break;
696 }
697 WREG32(DB_DEBUG3, db_debug3);
698
699 if (rdev->family != CHIP_RV770) {
615 db_debug4 = RREG32(DB_DEBUG4); 700 db_debug4 = RREG32(DB_DEBUG4);
616 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; 701 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
617 WREG32(DB_DEBUG4, db_debug4); 702 WREG32(DB_DEBUG4, db_debug4);
@@ -640,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
640 ALU_UPDATE_FIFO_HIWATER(0x8)); 725 ALU_UPDATE_FIFO_HIWATER(0x8));
641 switch (rdev->family) { 726 switch (rdev->family) {
642 case CHIP_RV770: 727 case CHIP_RV770:
643 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
644 break;
645 case CHIP_RV730: 728 case CHIP_RV730:
646 case CHIP_RV710: 729 case CHIP_RV710:
730 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
731 break;
647 case CHIP_RV740: 732 case CHIP_RV740:
648 default: 733 default:
649 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); 734 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
@@ -816,45 +901,13 @@ int rv770_mc_init(struct radeon_device *rdev)
816 /* Setup GPU memory space */ 901 /* Setup GPU memory space */
817 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 902 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
818 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 903 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
819 904 rdev->mc.visible_vram_size = rdev->mc.aper_size;
820 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 905 /* FIXME remove this once we support unmappable VRAM */
906 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
821 rdev->mc.mc_vram_size = rdev->mc.aper_size; 907 rdev->mc.mc_vram_size = rdev->mc.aper_size;
822
823 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
824 rdev->mc.real_vram_size = rdev->mc.aper_size; 908 rdev->mc.real_vram_size = rdev->mc.aper_size;
825
826 if (rdev->flags & RADEON_IS_AGP) {
827 /* gtt_size is setup by radeon_agp_init */
828 rdev->mc.gtt_location = rdev->mc.agp_base;
829 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
830 /* Try to put vram before or after AGP because we
831 * we want SYSTEM_APERTURE to cover both VRAM and
832 * AGP so that GPU can catch out of VRAM/AGP access
833 */
834 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
835 /* Enough place before */
836 rdev->mc.vram_location = rdev->mc.gtt_location -
837 rdev->mc.mc_vram_size;
838 } else if (tmp > rdev->mc.mc_vram_size) {
839 /* Enough place after */
840 rdev->mc.vram_location = rdev->mc.gtt_location +
841 rdev->mc.gtt_size;
842 } else {
843 /* Try to setup VRAM then AGP might not
844 * not work on some card
845 */
846 rdev->mc.vram_location = 0x00000000UL;
847 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
848 }
849 } else {
850 rdev->mc.vram_location = 0x00000000UL;
851 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
852 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
853 } 909 }
854 rdev->mc.vram_start = rdev->mc.vram_location; 910 r600_vram_gtt_location(rdev, &rdev->mc);
855 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
856 rdev->mc.gtt_start = rdev->mc.gtt_location;
857 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
858 /* FIXME: we should enforce default clock in case GPU is not in 911 /* FIXME: we should enforce default clock in case GPU is not in
859 * default setup 912 * default setup
860 */ 913 */
@@ -863,6 +916,7 @@ int rv770_mc_init(struct radeon_device *rdev)
863 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 916 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
864 return 0; 917 return 0;
865} 918}
919
866int rv770_gpu_reset(struct radeon_device *rdev) 920int rv770_gpu_reset(struct radeon_device *rdev)
867{ 921{
868 /* FIXME: implement any rv770 specific bits */ 922 /* FIXME: implement any rv770 specific bits */
@@ -1038,6 +1092,7 @@ int rv770_init(struct radeon_device *rdev)
1038 r = radeon_fence_driver_init(rdev); 1092 r = radeon_fence_driver_init(rdev);
1039 if (r) 1093 if (r)
1040 return r; 1094 return r;
1095 /* initialize AGP */
1041 if (rdev->flags & RADEON_IS_AGP) { 1096 if (rdev->flags & RADEON_IS_AGP) {
1042 r = radeon_agp_init(rdev); 1097 r = radeon_agp_init(rdev);
1043 if (r) 1098 if (r)
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index a1367ab6f261..9506f8cb99e0 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -343,4 +343,6 @@
343 343
344#define WAIT_UNTIL 0x8040 344#define WAIT_UNTIL 0x8040
345 345
346#define SRBM_STATUS 0x0E50
347
346#endif 348#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3d47a2c12322..a759170763bb 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -480,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
480 void *from_virtual; 480 void *from_virtual;
481 void *to_virtual; 481 void *to_virtual;
482 int i; 482 int i;
483 int ret; 483 int ret = -ENOMEM;
484 484
485 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 485 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
486 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, 486 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -499,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
499 499
500 for (i = 0; i < ttm->num_pages; ++i) { 500 for (i = 0; i < ttm->num_pages; ++i) {
501 from_page = read_mapping_page(swap_space, i, NULL); 501 from_page = read_mapping_page(swap_space, i, NULL);
502 if (IS_ERR(from_page)) 502 if (IS_ERR(from_page)) {
503 ret = PTR_ERR(from_page);
503 goto out_err; 504 goto out_err;
505 }
504 to_page = __ttm_tt_get_page(ttm, i); 506 to_page = __ttm_tt_get_page(ttm, i);
505 if (unlikely(to_page == NULL)) 507 if (unlikely(to_page == NULL))
506 goto out_err; 508 goto out_err;
@@ -523,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
523 return 0; 525 return 0;
524out_err: 526out_err:
525 ttm_tt_free_alloced_pages(ttm); 527 ttm_tt_free_alloced_pages(ttm);
526 return -ENOMEM; 528 return ret;
527} 529}
528 530
529int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 531int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -535,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
535 void *from_virtual; 537 void *from_virtual;
536 void *to_virtual; 538 void *to_virtual;
537 int i; 539 int i;
540 int ret = -ENOMEM;
538 541
539 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 542 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
540 BUG_ON(ttm->caching_state != tt_cached); 543 BUG_ON(ttm->caching_state != tt_cached);
@@ -557,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
557 0); 560 0);
558 if (unlikely(IS_ERR(swap_storage))) { 561 if (unlikely(IS_ERR(swap_storage))) {
559 printk(KERN_ERR "Failed allocating swap storage.\n"); 562 printk(KERN_ERR "Failed allocating swap storage.\n");
560 return -ENOMEM; 563 return PTR_ERR(swap_storage);
561 } 564 }
562 } else 565 } else
563 swap_storage = persistant_swap_storage; 566 swap_storage = persistant_swap_storage;
@@ -569,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
569 if (unlikely(from_page == NULL)) 572 if (unlikely(from_page == NULL))
570 continue; 573 continue;
571 to_page = read_mapping_page(swap_space, i, NULL); 574 to_page = read_mapping_page(swap_space, i, NULL);
572 if (unlikely(to_page == NULL)) 575 if (unlikely(IS_ERR(to_page))) {
576 ret = PTR_ERR(to_page);
573 goto out_err; 577 goto out_err;
574 578 }
575 preempt_disable(); 579 preempt_disable();
576 from_virtual = kmap_atomic(from_page, KM_USER0); 580 from_virtual = kmap_atomic(from_page, KM_USER0);
577 to_virtual = kmap_atomic(to_page, KM_USER1); 581 to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -595,5 +599,5 @@ out_err:
595 if (!persistant_swap_storage) 599 if (!persistant_swap_storage)
596 fput(swap_storage); 600 fput(swap_storage);
597 601
598 return -ENOMEM; 602 return ret;
599} 603}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 0920492cea0a..61ab4daf0bbb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -16,3 +16,14 @@ config VGA_ARB_MAX_GPUS
16 help 16 help
17 Reserves space in the kernel to maintain resource locking for 17 Reserves space in the kernel to maintain resource locking for
18 multiple GPUS. The overhead for each GPU is very small. 18 multiple GPUS. The overhead for each GPU is very small.
19
20config VGA_SWITCHEROO
21 bool "Laptop Hybrid Grapics - GPU switching support"
22 depends on X86
23 depends on ACPI
24 help
25 Many laptops released in 2008/9/10 have two gpus with a multiplxer
26 to switch between them. This adds support for dynamic switching when
27 X isn't running and delayed switching until the next logoff. This
28 features is called hybrid graphics, ATI PowerXpress, and Nvidia
29 HybridPower.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
index 7cc8c1ed645b..14ca30b75d0a 100644
--- a/drivers/gpu/vga/Makefile
+++ b/drivers/gpu/vga/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_VGA_ARB) += vgaarb.o 1obj-$(CONFIG_VGA_ARB) += vgaarb.o
2obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
new file mode 100644
index 000000000000..d6d1149d525d
--- /dev/null
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com>
4 *
5 *
6 * Licensed under GPLv2
7 *
8 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
9
10 Switcher interface - methods require for ATPX and DCM
11 - switchto - this throws the output MUX switch
12 - discrete_set_power - sets the power state for the discrete card
13
14 GPU driver interface
15 - set_gpu_state - this should do the equiv of s/r for the card
16 - this should *not* set the discrete power state
17 - switch_check - check if the device is in a position to switch now
18 */
19
20#include <linux/module.h>
21#include <linux/dmi.h>
22#include <linux/seq_file.h>
23#include <linux/uaccess.h>
24#include <linux/fs.h>
25#include <linux/debugfs.h>
26#include <linux/fb.h>
27
28#include <linux/pci.h>
29#include <linux/vga_switcheroo.h>
30
31struct vga_switcheroo_client {
32 struct pci_dev *pdev;
33 struct fb_info *fb_info;
34 int pwr_state;
35 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
36 bool (*can_switch)(struct pci_dev *pdev);
37 int id;
38 bool active;
39};
40
41static DEFINE_MUTEX(vgasr_mutex);
42
43struct vgasr_priv {
44
45 bool active;
46 bool delayed_switch_active;
47 enum vga_switcheroo_client_id delayed_client_id;
48
49 struct dentry *debugfs_root;
50 struct dentry *switch_file;
51
52 int registered_clients;
53 struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
54
55 struct vga_switcheroo_handler *handler;
56};
57
58static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
59static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
60
61/* only one switcheroo per system */
62static struct vgasr_priv vgasr_priv;
63
64int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
65{
66 mutex_lock(&vgasr_mutex);
67 if (vgasr_priv.handler) {
68 mutex_unlock(&vgasr_mutex);
69 return -EINVAL;
70 }
71
72 vgasr_priv.handler = handler;
73 mutex_unlock(&vgasr_mutex);
74 return 0;
75}
76EXPORT_SYMBOL(vga_switcheroo_register_handler);
77
78void vga_switcheroo_unregister_handler(void)
79{
80 mutex_lock(&vgasr_mutex);
81 vgasr_priv.handler = NULL;
82 mutex_unlock(&vgasr_mutex);
83}
84EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
85
86static void vga_switcheroo_enable(void)
87{
88 int i;
89 int ret;
90 /* call the handler to init */
91 vgasr_priv.handler->init();
92
93 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
94 ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
95 if (ret < 0)
96 return;
97
98 vgasr_priv.clients[i].id = ret;
99 }
100 vga_switcheroo_debugfs_init(&vgasr_priv);
101 vgasr_priv.active = true;
102}
103
104int vga_switcheroo_register_client(struct pci_dev *pdev,
105 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
106 bool (*can_switch)(struct pci_dev *pdev))
107{
108 int index;
109
110 mutex_lock(&vgasr_mutex);
111 /* don't do IGD vs DIS here */
112 if (vgasr_priv.registered_clients & 1)
113 index = 1;
114 else
115 index = 0;
116
117 vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
118 vgasr_priv.clients[index].pdev = pdev;
119 vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
120 vgasr_priv.clients[index].can_switch = can_switch;
121 vgasr_priv.clients[index].id = -1;
122 if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
123 vgasr_priv.clients[index].active = true;
124
125 vgasr_priv.registered_clients |= (1 << index);
126
127 /* if we get two clients + handler */
128 if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
129 printk(KERN_INFO "vga_switcheroo: enabled\n");
130 vga_switcheroo_enable();
131 }
132 mutex_unlock(&vgasr_mutex);
133 return 0;
134}
135EXPORT_SYMBOL(vga_switcheroo_register_client);
136
137void vga_switcheroo_unregister_client(struct pci_dev *pdev)
138{
139 int i;
140
141 mutex_lock(&vgasr_mutex);
142 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
143 if (vgasr_priv.clients[i].pdev == pdev) {
144 vgasr_priv.registered_clients &= ~(1 << i);
145 break;
146 }
147 }
148
149 printk(KERN_INFO "vga_switcheroo: disabled\n");
150 vga_switcheroo_debugfs_fini(&vgasr_priv);
151 vgasr_priv.active = false;
152 mutex_unlock(&vgasr_mutex);
153}
154EXPORT_SYMBOL(vga_switcheroo_unregister_client);
155
156void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
157 struct fb_info *info)
158{
159 int i;
160
161 mutex_lock(&vgasr_mutex);
162 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
163 if (vgasr_priv.clients[i].pdev == pdev) {
164 vgasr_priv.clients[i].fb_info = info;
165 break;
166 }
167 }
168 mutex_unlock(&vgasr_mutex);
169}
170EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
171
172static int vga_switcheroo_show(struct seq_file *m, void *v)
173{
174 int i;
175 mutex_lock(&vgasr_mutex);
176 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
177 seq_printf(m, "%d:%c:%s:%s\n", i,
178 vgasr_priv.clients[i].active ? '+' : ' ',
179 vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
180 pci_name(vgasr_priv.clients[i].pdev));
181 }
182 mutex_unlock(&vgasr_mutex);
183 return 0;
184}
185
186static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
187{
188 return single_open(file, vga_switcheroo_show, NULL);
189}
190
191static int vga_switchon(struct vga_switcheroo_client *client)
192{
193 int ret;
194
195 ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
196 /* call the driver callback to turn on device */
197 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
198 client->pwr_state = VGA_SWITCHEROO_ON;
199 return 0;
200}
201
202static int vga_switchoff(struct vga_switcheroo_client *client)
203{
204 /* call the driver callback to turn off device */
205 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
206 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
207 client->pwr_state = VGA_SWITCHEROO_OFF;
208 return 0;
209}
210
211static int vga_switchto(struct vga_switcheroo_client *new_client)
212{
213 int ret;
214 int i;
215 struct vga_switcheroo_client *active = NULL;
216
217 if (new_client->active == true)
218 return 0;
219
220 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
221 if (vgasr_priv.clients[i].active == true) {
222 active = &vgasr_priv.clients[i];
223 break;
224 }
225 }
226 if (!active)
227 return 0;
228
229 /* power up the first device */
230 ret = pci_enable_device(new_client->pdev);
231 if (ret)
232 return ret;
233
234 if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
235 vga_switchon(new_client);
236
237 /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
238 active->active = false;
239
240 active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
241 new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
242
243 if (new_client->fb_info) {
244 struct fb_event event;
245 event.info = new_client->fb_info;
246 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
247 }
248
249 ret = vgasr_priv.handler->switchto(new_client->id);
250 if (ret)
251 return ret;
252
253 if (active->pwr_state == VGA_SWITCHEROO_ON)
254 vga_switchoff(active);
255
256 new_client->active = true;
257 return 0;
258}
259
260static ssize_t
261vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
262 size_t cnt, loff_t *ppos)
263{
264 char usercmd[64];
265 const char *pdev_name;
266 int i, ret;
267 bool delay = false, can_switch;
268 int client_id = -1;
269 struct vga_switcheroo_client *client = NULL;
270
271 if (cnt > 63)
272 cnt = 63;
273
274 if (copy_from_user(usercmd, ubuf, cnt))
275 return -EFAULT;
276
277 mutex_lock(&vgasr_mutex);
278
279 if (!vgasr_priv.active)
280 return -EINVAL;
281
282 /* pwr off the device not in use */
283 if (strncmp(usercmd, "OFF", 3) == 0) {
284 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
285 if (vgasr_priv.clients[i].active)
286 continue;
287 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
288 vga_switchoff(&vgasr_priv.clients[i]);
289 }
290 goto out;
291 }
292 /* pwr on the device not in use */
293 if (strncmp(usercmd, "ON", 2) == 0) {
294 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
295 if (vgasr_priv.clients[i].active)
296 continue;
297 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
298 vga_switchon(&vgasr_priv.clients[i]);
299 }
300 goto out;
301 }
302
303 /* request a delayed switch - test can we switch now */
304 if (strncmp(usercmd, "DIGD", 4) == 0) {
305 client_id = VGA_SWITCHEROO_IGD;
306 delay = true;
307 }
308
309 if (strncmp(usercmd, "DDIS", 4) == 0) {
310 client_id = VGA_SWITCHEROO_DIS;
311 delay = true;
312 }
313
314 if (strncmp(usercmd, "IGD", 3) == 0)
315 client_id = VGA_SWITCHEROO_IGD;
316
317 if (strncmp(usercmd, "DIS", 3) == 0)
318 client_id = VGA_SWITCHEROO_DIS;
319
320 if (client_id == -1)
321 goto out;
322
323 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
324 if (vgasr_priv.clients[i].id == client_id) {
325 client = &vgasr_priv.clients[i];
326 break;
327 }
328 }
329
330 vgasr_priv.delayed_switch_active = false;
331 /* okay we want a switch - test if devices are willing to switch */
332 can_switch = true;
333 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
334 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
335 if (can_switch == false) {
336 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
337 break;
338 }
339 }
340
341 if (can_switch == false && delay == false)
342 goto out;
343
344 if (can_switch == true) {
345 pdev_name = pci_name(client->pdev);
346 ret = vga_switchto(client);
347 if (ret)
348 printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
349 } else {
350 printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
351 vgasr_priv.delayed_switch_active = true;
352 vgasr_priv.delayed_client_id = client_id;
353
354 /* we should at least power up the card to
355 make the switch faster */
356 if (client->pwr_state == VGA_SWITCHEROO_OFF)
357 vga_switchon(client);
358 }
359
360out:
361 mutex_unlock(&vgasr_mutex);
362 return cnt;
363}
364
365static const struct file_operations vga_switcheroo_debugfs_fops = {
366 .owner = THIS_MODULE,
367 .open = vga_switcheroo_debugfs_open,
368 .write = vga_switcheroo_debugfs_write,
369 .read = seq_read,
370 .llseek = seq_lseek,
371 .release = single_release,
372};
373
374static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
375{
376 if (priv->switch_file) {
377 debugfs_remove(priv->switch_file);
378 priv->switch_file = NULL;
379 }
380 if (priv->debugfs_root) {
381 debugfs_remove(priv->debugfs_root);
382 priv->debugfs_root = NULL;
383 }
384}
385
386static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
387{
388 /* already initialised */
389 if (priv->debugfs_root)
390 return 0;
391 priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
392
393 if (!priv->debugfs_root) {
394 printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
395 goto fail;
396 }
397
398 priv->switch_file = debugfs_create_file("switch", 0644,
399 priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
400 if (!priv->switch_file) {
401 printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
402 goto fail;
403 }
404 return 0;
405fail:
406 vga_switcheroo_debugfs_fini(priv);
407 return -1;
408}
409
410int vga_switcheroo_process_delayed_switch(void)
411{
412 struct vga_switcheroo_client *client = NULL;
413 const char *pdev_name;
414 bool can_switch = true;
415 int i;
416 int ret;
417 int err = -EINVAL;
418
419 mutex_lock(&vgasr_mutex);
420 if (!vgasr_priv.delayed_switch_active)
421 goto err;
422
423 printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
424
425 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
426 if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
427 client = &vgasr_priv.clients[i];
428 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
429 if (can_switch == false) {
430 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
431 break;
432 }
433 }
434
435 if (can_switch == false || client == NULL)
436 goto err;
437
438 pdev_name = pci_name(client->pdev);
439 ret = vga_switchto(client);
440 if (ret)
441 printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
442
443 vgasr_priv.delayed_switch_active = false;
444 err = 0;
445err:
446 mutex_unlock(&vgasr_mutex);
447 return err;
448}
449EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
450
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 867e08433e4b..433602aed468 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -265,9 +265,10 @@ static int hiddev_release(struct inode * inode, struct file * file)
265static int hiddev_open(struct inode *inode, struct file *file) 265static int hiddev_open(struct inode *inode, struct file *file)
266{ 266{
267 struct hiddev_list *list; 267 struct hiddev_list *list;
268 int res; 268 int res, i;
269 269
270 int i = iminor(inode) - HIDDEV_MINOR_BASE; 270 lock_kernel();
271 i = iminor(inode) - HIDDEV_MINOR_BASE;
271 272
272 if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i]) 273 if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i])
273 return -ENODEV; 274 return -ENODEV;
@@ -313,10 +314,12 @@ static int hiddev_open(struct inode *inode, struct file *file)
313 usbhid_open(hid); 314 usbhid_open(hid);
314 } 315 }
315 316
317 unlock_kernel();
316 return 0; 318 return 0;
317bail: 319bail:
318 file->private_data = NULL; 320 file->private_data = NULL;
319 kfree(list); 321 kfree(list);
322 unlock_kernel();
320 return res; 323 return res;
321} 324}
322 325
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 8d8a00e5a30e..02ce9cff5fcf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -61,6 +61,16 @@ config I2C_HELPER_AUTO
61 61
62 In doubt, say Y. 62 In doubt, say Y.
63 63
64config I2C_SMBUS
65 tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
66 help
67 Say Y here if you want support for SMBus extensions to the I2C
68 specification. At the moment, the only supported extension is
69 the SMBus alert protocol.
70
71 This support is also available as a module. If so, the module
72 will be called i2c-smbus.
73
64source drivers/i2c/algos/Kconfig 74source drivers/i2c/algos/Kconfig
65source drivers/i2c/busses/Kconfig 75source drivers/i2c/busses/Kconfig
66source drivers/i2c/chips/Kconfig 76source drivers/i2c/chips/Kconfig
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index ba26e6cbe74e..acd0250c16a0 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o 5obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
6obj-$(CONFIG_I2C) += i2c-core.o 6obj-$(CONFIG_I2C) += i2c-core.o
7obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
7obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o 8obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
8obj-y += busses/ chips/ algos/ 9obj-y += busses/ chips/ algos/
9 10
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 78d42aae0089..dcdaf8e675bf 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -453,8 +453,6 @@ static int pca_init(struct i2c_adapter *adap)
453 */ 453 */
454 int raise_fall_time; 454 int raise_fall_time;
455 455
456 struct i2c_algo_pca_data *pca_data = adap->algo_data;
457
458 /* Ignore the reset function from the module, 456 /* Ignore the reset function from the module,
459 * we can use the parallel bus reset 457 * we can use the parallel bus reset
460 */ 458 */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 737f05200b1d..4cc3807bd31c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,7 +77,7 @@ config I2C_AMD8111
77 will be called i2c-amd8111. 77 will be called i2c-amd8111.
78 78
79config I2C_I801 79config I2C_I801
80 tristate "Intel 82801 (ICH)" 80 tristate "Intel 82801 (ICH/PCH)"
81 depends on PCI 81 depends on PCI
82 help 82 help
83 If you say yes to this option, support will be included for the Intel 83 If you say yes to this option, support will be included for the Intel
@@ -97,7 +97,8 @@ config I2C_I801
97 ICH9 97 ICH9
98 Tolapai 98 Tolapai
99 ICH10 99 ICH10
100 PCH 100 3400/5 Series (PCH)
101 Cougar Point (PCH)
101 102
102 This driver can also be built as a module. If so, the module 103 This driver can also be built as a module. If so, the module
103 will be called i2c-i801. 104 will be called i2c-i801.
@@ -580,6 +581,7 @@ config I2C_PARPORT
580 tristate "Parallel port adapter" 581 tristate "Parallel port adapter"
581 depends on PARPORT 582 depends on PARPORT
582 select I2C_ALGOBIT 583 select I2C_ALGOBIT
584 select I2C_SMBUS
583 help 585 help
584 This supports parallel port I2C adapters such as the ones made by 586 This supports parallel port I2C adapters such as the ones made by
585 Philips or Velleman, Analog Devices evaluation boards, and more. 587 Philips or Velleman, Analog Devices evaluation boards, and more.
@@ -603,6 +605,7 @@ config I2C_PARPORT
603config I2C_PARPORT_LIGHT 605config I2C_PARPORT_LIGHT
604 tristate "Parallel port adapter (light)" 606 tristate "Parallel port adapter (light)"
605 select I2C_ALGOBIT 607 select I2C_ALGOBIT
608 select I2C_SMBUS
606 help 609 help
607 This supports parallel port I2C adapters such as the ones made by 610 This supports parallel port I2C adapters such as the ones made by
608 Philips or Velleman, Analog Devices evaluation boards, and more. 611 Philips or Velleman, Analog Devices evaluation boards, and more.
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 8de7d7b87bb0..bd8f1e4d9e6c 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -480,7 +480,7 @@ static struct i2c_adapter ali1535_adapter = {
480 .algo = &smbus_algorithm, 480 .algo = &smbus_algorithm,
481}; 481};
482 482
483static struct pci_device_id ali1535_ids[] = { 483static const struct pci_device_id ali1535_ids[] = {
484 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, 484 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
485 { }, 485 { },
486}; 486};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4687af40dd50..a409cfcf0629 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev)
417 ali1563_shutdown(dev); 417 ali1563_shutdown(dev);
418} 418}
419 419
420static struct pci_device_id __devinitdata ali1563_id_table[] = { 420static const struct pci_device_id ali1563_id_table[] __devinitconst = {
421 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) }, 421 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
422 {}, 422 {},
423}; 423};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index e7e3205f1286..659f63f5e4af 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = {
477 .algo = &smbus_algorithm, 477 .algo = &smbus_algorithm,
478}; 478};
479 479
480static struct pci_device_id ali15x3_ids[] = { 480static const struct pci_device_id ali15x3_ids[] = {
481 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, 481 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
482 { 0, } 482 { 0, }
483}; 483};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 8f0b90ef8c76..c5a9fa488e7f 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@ static const char* chipname[] = {
308 "nVidia nForce", "AMD8111", 308 "nVidia nForce", "AMD8111",
309}; 309};
310 310
311static struct pci_device_id amd756_ids[] = { 311static const struct pci_device_id amd756_ids[] = {
312 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B), 312 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
313 .driver_data = AMD756 }, 313 .driver_data = AMD756 },
314 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413), 314 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 5b4ad86ca166..d0dc970d7370 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -351,7 +351,7 @@ static const struct i2c_algorithm smbus_algorithm = {
351}; 351};
352 352
353 353
354static struct pci_device_id amd8111_ids[] = { 354static const struct pci_device_id amd8111_ids[] = {
355 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) }, 355 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
356 { 0, } 356 { 0, }
357}; 357};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index bec9b845dd16..c767295ad1fb 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = {
105 .algo_data = &hydra_bit_data, 105 .algo_data = &hydra_bit_data,
106}; 106};
107 107
108static struct pci_device_id hydra_ids[] = { 108static const struct pci_device_id hydra_ids[] = {
109 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) }, 109 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
110 { 0, } 110 { 0, }
111}; 111};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index df6ab553f975..9da5b05cdb52 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,7 +41,8 @@
41 Tolapai 0x5032 32 hard yes yes yes 41 Tolapai 0x5032 32 hard yes yes yes
42 ICH10 0x3a30 32 hard yes yes yes 42 ICH10 0x3a30 32 hard yes yes yes
43 ICH10 0x3a60 32 hard yes yes yes 43 ICH10 0x3a60 32 hard yes yes yes
44 PCH 0x3b30 32 hard yes yes yes 44 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
45 Cougar Point (PCH) 0x1c22 32 hard yes yes yes
45 46
46 Features supported by this driver: 47 Features supported by this driver:
47 Software PEC no 48 Software PEC no
@@ -561,7 +562,7 @@ static struct i2c_adapter i801_adapter = {
561 .algo = &smbus_algorithm, 562 .algo = &smbus_algorithm,
562}; 563};
563 564
564static struct pci_device_id i801_ids[] = { 565static const struct pci_device_id i801_ids[] = {
565 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) }, 566 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
566 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) }, 567 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
567 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) }, 568 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
@@ -578,6 +579,7 @@ static struct pci_device_id i801_ids[] = {
578 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, 579 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
579 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, 580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, 581 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
582 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
581 { 0, } 583 { 0, }
582}; 584};
583 585
@@ -707,6 +709,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
707 case PCI_DEVICE_ID_INTEL_ICH10_4: 709 case PCI_DEVICE_ID_INTEL_ICH10_4:
708 case PCI_DEVICE_ID_INTEL_ICH10_5: 710 case PCI_DEVICE_ID_INTEL_ICH10_5:
709 case PCI_DEVICE_ID_INTEL_PCH_SMBUS: 711 case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
712 case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
710 i801_features |= FEATURE_I2C_BLOCK_READ; 713 i801_features |= FEATURE_I2C_BLOCK_READ;
711 /* fall through */ 714 /* fall through */
712 case PCI_DEVICE_ID_INTEL_82801DB_3: 715 case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index dba6eb053e2f..69c22f79f231 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -256,7 +256,7 @@ static struct i2c_adapter sch_adapter = {
256 .algo = &smbus_algorithm, 256 .algo = &smbus_algorithm,
257}; 257};
258 258
259static struct pci_device_id sch_ids[] = { 259static const struct pci_device_id sch_ids[] = {
260 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) }, 260 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
261 { 0, } 261 { 0, }
262}; 262};
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ec11d1c4e77b..4a700587ef18 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -308,7 +308,7 @@ static struct i2c_algorithm smbus_algorithm = {
308}; 308};
309 309
310 310
311static struct pci_device_id nforce2_ids[] = { 311static const struct pci_device_id nforce2_ids[] = {
312 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) }, 312 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
313 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) }, 313 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
314 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) }, 314 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 322c5691e38e..5f41ec0f72d2 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------ * 1/* ------------------------------------------------------------------------ *
2 * i2c-parport-light.c I2C bus over parallel port * 2 * i2c-parport-light.c I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 Based on older i2c-velleman.c driver 6 Based on older i2c-velleman.c driver
7 Copyright (C) 1995-2000 Simon G. Vogl 7 Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,10 +27,12 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/delay.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/ioport.h> 32#include <linux/ioport.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
33#include <linux/i2c-algo-bit.h> 34#include <linux/i2c-algo-bit.h>
35#include <linux/i2c-smbus.h>
34#include <asm/io.h> 36#include <asm/io.h>
35#include "i2c-parport.h" 37#include "i2c-parport.h"
36 38
@@ -43,6 +45,10 @@ static u16 base;
43module_param(base, ushort, 0); 45module_param(base, ushort, 0);
44MODULE_PARM_DESC(base, "Base I/O address"); 46MODULE_PARM_DESC(base, "Base I/O address");
45 47
48static int irq;
49module_param(irq, int, 0);
50MODULE_PARM_DESC(irq, "IRQ (optional)");
51
46/* ----- Low-level parallel port access ----------------------------------- */ 52/* ----- Low-level parallel port access ----------------------------------- */
47 53
48static inline void port_write(unsigned char p, unsigned char d) 54static inline void port_write(unsigned char p, unsigned char d)
@@ -119,6 +125,16 @@ static struct i2c_adapter parport_adapter = {
119 .name = "Parallel port adapter (light)", 125 .name = "Parallel port adapter (light)",
120}; 126};
121 127
128/* SMBus alert support */
129static struct i2c_smbus_alert_setup alert_data = {
130 .alert_edge_triggered = 1,
131};
132static struct i2c_client *ara;
133static struct lineop parport_ctrl_irq = {
134 .val = (1 << 4),
135 .port = CTRL,
136};
137
122static int __devinit i2c_parport_probe(struct platform_device *pdev) 138static int __devinit i2c_parport_probe(struct platform_device *pdev)
123{ 139{
124 int err; 140 int err;
@@ -127,18 +143,39 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev)
127 parport_setsda(NULL, 1); 143 parport_setsda(NULL, 1);
128 parport_setscl(NULL, 1); 144 parport_setscl(NULL, 1);
129 /* Other init if needed (power on...) */ 145 /* Other init if needed (power on...) */
130 if (adapter_parm[type].init.val) 146 if (adapter_parm[type].init.val) {
131 line_set(1, &adapter_parm[type].init); 147 line_set(1, &adapter_parm[type].init);
148 /* Give powered devices some time to settle */
149 msleep(100);
150 }
132 151
133 parport_adapter.dev.parent = &pdev->dev; 152 parport_adapter.dev.parent = &pdev->dev;
134 err = i2c_bit_add_bus(&parport_adapter); 153 err = i2c_bit_add_bus(&parport_adapter);
135 if (err) 154 if (err) {
136 dev_err(&pdev->dev, "Unable to register with I2C\n"); 155 dev_err(&pdev->dev, "Unable to register with I2C\n");
137 return err; 156 return err;
157 }
158
159 /* Setup SMBus alert if supported */
160 if (adapter_parm[type].smbus_alert && irq) {
161 alert_data.irq = irq;
162 ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data);
163 if (ara)
164 line_set(1, &parport_ctrl_irq);
165 else
166 dev_warn(&pdev->dev, "Failed to register ARA client\n");
167 }
168
169 return 0;
138} 170}
139 171
140static int __devexit i2c_parport_remove(struct platform_device *pdev) 172static int __devexit i2c_parport_remove(struct platform_device *pdev)
141{ 173{
174 if (ara) {
175 line_set(0, &parport_ctrl_irq);
176 i2c_unregister_device(ara);
177 ara = NULL;
178 }
142 i2c_del_adapter(&parport_adapter); 179 i2c_del_adapter(&parport_adapter);
143 180
144 /* Un-init if needed (power off...) */ 181 /* Un-init if needed (power off...) */
@@ -205,6 +242,9 @@ static int __init i2c_parport_init(void)
205 if (!request_region(base, 3, DRVNAME)) 242 if (!request_region(base, 3, DRVNAME))
206 return -EBUSY; 243 return -EBUSY;
207 244
245 if (irq != 0)
246 pr_info(DRVNAME ": using irq %d\n", irq);
247
208 if (!adapter_parm[type].getscl.val) 248 if (!adapter_parm[type].getscl.val)
209 parport_algo_data.getscl = NULL; 249 parport_algo_data.getscl = NULL;
210 250
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 0d8998610c74..220fca7f23a6 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------ * 1/* ------------------------------------------------------------------------ *
2 * i2c-parport.c I2C bus over parallel port * 2 * i2c-parport.c I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 Based on older i2c-philips-par.c driver 6 Based on older i2c-philips-par.c driver
7 Copyright (C) 1995-2000 Simon G. Vogl 7 Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,9 +27,11 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/delay.h>
30#include <linux/parport.h> 31#include <linux/parport.h>
31#include <linux/i2c.h> 32#include <linux/i2c.h>
32#include <linux/i2c-algo-bit.h> 33#include <linux/i2c-algo-bit.h>
34#include <linux/i2c-smbus.h>
33#include "i2c-parport.h" 35#include "i2c-parport.h"
34 36
35/* ----- Device list ------------------------------------------------------ */ 37/* ----- Device list ------------------------------------------------------ */
@@ -38,6 +40,8 @@ struct i2c_par {
38 struct pardevice *pdev; 40 struct pardevice *pdev;
39 struct i2c_adapter adapter; 41 struct i2c_adapter adapter;
40 struct i2c_algo_bit_data algo_data; 42 struct i2c_algo_bit_data algo_data;
43 struct i2c_smbus_alert_setup alert_data;
44 struct i2c_client *ara;
41 struct i2c_par *next; 45 struct i2c_par *next;
42}; 46};
43 47
@@ -143,6 +147,19 @@ static struct i2c_algo_bit_data parport_algo_data = {
143 147
144/* ----- I2c and parallel port call-back functions and structures --------- */ 148/* ----- I2c and parallel port call-back functions and structures --------- */
145 149
150void i2c_parport_irq(void *data)
151{
152 struct i2c_par *adapter = data;
153 struct i2c_client *ara = adapter->ara;
154
155 if (ara) {
156 dev_dbg(&ara->dev, "SMBus alert received\n");
157 i2c_handle_smbus_alert(ara);
158 } else
159 dev_dbg(&adapter->adapter.dev,
160 "SMBus alert received but no ARA client!\n");
161}
162
146static void i2c_parport_attach (struct parport *port) 163static void i2c_parport_attach (struct parport *port)
147{ 164{
148 struct i2c_par *adapter; 165 struct i2c_par *adapter;
@@ -154,8 +171,9 @@ static void i2c_parport_attach (struct parport *port)
154 } 171 }
155 172
156 pr_debug("i2c-parport: attaching to %s\n", port->name); 173 pr_debug("i2c-parport: attaching to %s\n", port->name);
174 parport_disable_irq(port);
157 adapter->pdev = parport_register_device(port, "i2c-parport", 175 adapter->pdev = parport_register_device(port, "i2c-parport",
158 NULL, NULL, NULL, PARPORT_FLAG_EXCL, NULL); 176 NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
159 if (!adapter->pdev) { 177 if (!adapter->pdev) {
160 printk(KERN_ERR "i2c-parport: Unable to register with parport\n"); 178 printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
161 goto ERROR0; 179 goto ERROR0;
@@ -185,14 +203,29 @@ static void i2c_parport_attach (struct parport *port)
185 parport_setsda(port, 1); 203 parport_setsda(port, 1);
186 parport_setscl(port, 1); 204 parport_setscl(port, 1);
187 /* Other init if needed (power on...) */ 205 /* Other init if needed (power on...) */
188 if (adapter_parm[type].init.val) 206 if (adapter_parm[type].init.val) {
189 line_set(port, 1, &adapter_parm[type].init); 207 line_set(port, 1, &adapter_parm[type].init);
208 /* Give powered devices some time to settle */
209 msleep(100);
210 }
190 211
191 if (i2c_bit_add_bus(&adapter->adapter) < 0) { 212 if (i2c_bit_add_bus(&adapter->adapter) < 0) {
192 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n"); 213 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
193 goto ERROR1; 214 goto ERROR1;
194 } 215 }
195 216
217 /* Setup SMBus alert if supported */
218 if (adapter_parm[type].smbus_alert) {
219 adapter->alert_data.alert_edge_triggered = 1;
220 adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
221 &adapter->alert_data);
222 if (adapter->ara)
223 parport_enable_irq(port);
224 else
225 printk(KERN_WARNING "i2c-parport: Failed to register "
226 "ARA client\n");
227 }
228
196 /* Add the new adapter to the list */ 229 /* Add the new adapter to the list */
197 adapter->next = adapter_list; 230 adapter->next = adapter_list;
198 adapter_list = adapter; 231 adapter_list = adapter;
@@ -213,6 +246,10 @@ static void i2c_parport_detach (struct parport *port)
213 for (prev = NULL, adapter = adapter_list; adapter; 246 for (prev = NULL, adapter = adapter_list; adapter;
214 prev = adapter, adapter = adapter->next) { 247 prev = adapter, adapter = adapter->next) {
215 if (adapter->pdev->port == port) { 248 if (adapter->pdev->port == port) {
249 if (adapter->ara) {
250 parport_disable_irq(port);
251 i2c_unregister_device(adapter->ara);
252 }
216 i2c_del_adapter(&adapter->adapter); 253 i2c_del_adapter(&adapter->adapter);
217 254
218 /* Un-init if needed (power off...) */ 255 /* Un-init if needed (power off...) */
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index ed69d846cb95..a9f66816546c 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------ * 1/* ------------------------------------------------------------------------ *
2 * i2c-parport.h I2C bus over parallel port * 2 * i2c-parport.h I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -38,6 +38,7 @@ struct adapter_parm {
38 struct lineop getsda; 38 struct lineop getsda;
39 struct lineop getscl; 39 struct lineop getscl;
40 struct lineop init; 40 struct lineop init;
41 unsigned int smbus_alert:1;
41}; 42};
42 43
43static struct adapter_parm adapter_parm[] = { 44static struct adapter_parm adapter_parm[] = {
@@ -73,6 +74,7 @@ static struct adapter_parm adapter_parm[] = {
73 .setscl = { 0x01, DATA, 1 }, 74 .setscl = { 0x01, DATA, 1 },
74 .getsda = { 0x10, STAT, 1 }, 75 .getsda = { 0x10, STAT, 1 },
75 .init = { 0xf0, DATA, 0 }, 76 .init = { 0xf0, DATA, 0 },
77 .smbus_alert = 1,
76 }, 78 },
77 /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */ 79 /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
78 { 80 {
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index adf0fbb902f0..0d20ff46a518 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -400,7 +400,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev)
400 kfree(smbus); 400 kfree(smbus);
401} 401}
402 402
403static struct pci_device_id pasemi_smb_ids[] = { 403static const struct pci_device_id pasemi_smb_ids[] = {
404 { PCI_DEVICE(0x1959, 0xa003) }, 404 { PCI_DEVICE(0x1959, 0xa003) },
405 { 0, } 405 { 0, }
406}; 406};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e56e4b6823ca..ee9da6fcf69a 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = {
472 .algo = &smbus_algorithm, 472 .algo = &smbus_algorithm,
473}; 473};
474 474
475static struct pci_device_id piix4_ids[] = { 475static const struct pci_device_id piix4_ids[] = {
476 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, 476 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
477 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) }, 477 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
478 { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) }, 478 { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 844569f7d8b7..55a71370c79b 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
369 .algo = &smbus_algorithm, 369 .algo = &smbus_algorithm,
370}; 370};
371 371
372static struct pci_device_id sis5595_ids[] __devinitdata = { 372static const struct pci_device_id sis5595_ids[] __devinitconst = {
373 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, 373 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
374 { 0, } 374 { 0, }
375}; 375};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 68cff7af7013..2309c7f1bde2 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -468,7 +468,7 @@ static struct i2c_adapter sis630_adapter = {
468 .algo = &smbus_algorithm, 468 .algo = &smbus_algorithm,
469}; 469};
470 470
471static struct pci_device_id sis630_ids[] __devinitdata = { 471static const struct pci_device_id sis630_ids[] __devinitconst = {
472 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, 472 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
473 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, 473 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
474 { 0, } 474 { 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 1649963b00dc..d43d8f8943dd 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = {
245 .algo = &smbus_algorithm, 245 .algo = &smbus_algorithm,
246}; 246};
247 247
248static struct pci_device_id sis96x_ids[] = { 248static const struct pci_device_id sis96x_ids[] = {
249 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, 249 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
250 { 0, } 250 { 0, }
251}; 251};
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index e29b6d5ba8ef..b5b1bbf37d3c 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -31,11 +31,13 @@
31#define CMD_I2C_IO_BEGIN (1<<0) 31#define CMD_I2C_IO_BEGIN (1<<0)
32#define CMD_I2C_IO_END (1<<1) 32#define CMD_I2C_IO_END (1<<1)
33 33
34/* i2c bit delay, default is 10us -> 100kHz */ 34/* i2c bit delay, default is 10us -> 100kHz max
35 (in practice, due to additional delays in the i2c bitbanging
36 code this results in a i2c clock of about 50kHz) */
35static unsigned short delay = 10; 37static unsigned short delay = 10;
36module_param(delay, ushort, 0); 38module_param(delay, ushort, 0);
37MODULE_PARM_DESC(delay, "bit delay in microseconds, " 39MODULE_PARM_DESC(delay, "bit delay in microseconds "
38 "e.g. 10 for 100kHz (default is 100kHz)"); 40 "(default is 10us for 100kHz max)");
39 41
40static int usb_read(struct i2c_adapter *adapter, int cmd, 42static int usb_read(struct i2c_adapter *adapter, int cmd,
41 int value, int index, void *data, int len); 43 int value, int index, void *data, int len);
@@ -137,7 +139,7 @@ static const struct i2c_algorithm usb_algorithm = {
137 * Future Technology Devices International Ltd., later a pair was 139 * Future Technology Devices International Ltd., later a pair was
138 * bought from EZPrototypes 140 * bought from EZPrototypes
139 */ 141 */
140static struct usb_device_id i2c_tiny_usb_table [] = { 142static const struct usb_device_id i2c_tiny_usb_table[] = {
141 { USB_DEVICE(0x0403, 0xc631) }, /* FTDI */ 143 { USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
142 { USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */ 144 { USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
143 { } /* Terminating entry */ 145 { } /* Terminating entry */
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 8b24f192103a..de78283bddbe 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = {
89}; 89};
90 90
91 91
92static struct pci_device_id vt586b_ids[] __devinitdata = { 92static const struct pci_device_id vt586b_ids[] __devinitconst = {
93 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) }, 93 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
94 { 0, } 94 { 0, }
95}; 95};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index a84a909e1234..d57292e5dae0 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -444,7 +444,7 @@ release_region:
444 return error; 444 return error;
445} 445}
446 446
447static struct pci_device_id vt596_ids[] = { 447static const struct pci_device_id vt596_ids[] = {
448 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3), 448 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
449 .driver_data = SMBBA1 }, 449 .driver_data = SMBBA1 },
450 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3), 450 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 10be7b5fbe97..3202a86f420e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -34,6 +34,7 @@
34#include <linux/hardirq.h> 34#include <linux/hardirq.h>
35#include <linux/irqflags.h> 35#include <linux/irqflags.h>
36#include <linux/rwsem.h> 36#include <linux/rwsem.h>
37#include <linux/pm_runtime.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
39#include "i2c-core.h" 40#include "i2c-core.h"
@@ -184,6 +185,52 @@ static int i2c_device_pm_resume(struct device *dev)
184#define i2c_device_pm_resume NULL 185#define i2c_device_pm_resume NULL
185#endif 186#endif
186 187
188#ifdef CONFIG_PM_RUNTIME
189static int i2c_device_runtime_suspend(struct device *dev)
190{
191 const struct dev_pm_ops *pm;
192
193 if (!dev->driver)
194 return 0;
195 pm = dev->driver->pm;
196 if (!pm || !pm->runtime_suspend)
197 return 0;
198 return pm->runtime_suspend(dev);
199}
200
201static int i2c_device_runtime_resume(struct device *dev)
202{
203 const struct dev_pm_ops *pm;
204
205 if (!dev->driver)
206 return 0;
207 pm = dev->driver->pm;
208 if (!pm || !pm->runtime_resume)
209 return 0;
210 return pm->runtime_resume(dev);
211}
212
213static int i2c_device_runtime_idle(struct device *dev)
214{
215 const struct dev_pm_ops *pm = NULL;
216 int ret;
217
218 if (dev->driver)
219 pm = dev->driver->pm;
220 if (pm && pm->runtime_idle) {
221 ret = pm->runtime_idle(dev);
222 if (ret)
223 return ret;
224 }
225
226 return pm_runtime_suspend(dev);
227}
228#else
229#define i2c_device_runtime_suspend NULL
230#define i2c_device_runtime_resume NULL
231#define i2c_device_runtime_idle NULL
232#endif
233
187static int i2c_device_suspend(struct device *dev, pm_message_t mesg) 234static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
188{ 235{
189 struct i2c_client *client = i2c_verify_client(dev); 236 struct i2c_client *client = i2c_verify_client(dev);
@@ -251,6 +298,9 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
251static const struct dev_pm_ops i2c_device_pm_ops = { 298static const struct dev_pm_ops i2c_device_pm_ops = {
252 .suspend = i2c_device_pm_suspend, 299 .suspend = i2c_device_pm_suspend,
253 .resume = i2c_device_pm_resume, 300 .resume = i2c_device_pm_resume,
301 .runtime_suspend = i2c_device_runtime_suspend,
302 .runtime_resume = i2c_device_runtime_resume,
303 .runtime_idle = i2c_device_runtime_idle,
254}; 304};
255 305
256struct bus_type i2c_bus_type = { 306struct bus_type i2c_bus_type = {
@@ -1133,7 +1183,7 @@ EXPORT_SYMBOL(i2c_transfer);
1133 * i2c_master_send - issue a single I2C message in master transmit mode 1183 * i2c_master_send - issue a single I2C message in master transmit mode
1134 * @client: Handle to slave device 1184 * @client: Handle to slave device
1135 * @buf: Data that will be written to the slave 1185 * @buf: Data that will be written to the slave
1136 * @count: How many bytes to write 1186 * @count: How many bytes to write, must be less than 64k since msg.len is u16
1137 * 1187 *
1138 * Returns negative errno, or else the number of bytes written. 1188 * Returns negative errno, or else the number of bytes written.
1139 */ 1189 */
@@ -1160,7 +1210,7 @@ EXPORT_SYMBOL(i2c_master_send);
1160 * i2c_master_recv - issue a single I2C message in master receive mode 1210 * i2c_master_recv - issue a single I2C message in master receive mode
1161 * @client: Handle to slave device 1211 * @client: Handle to slave device
1162 * @buf: Where to store data read from slave 1212 * @buf: Where to store data read from slave
1163 * @count: How many bytes to read 1213 * @count: How many bytes to read, must be less than 64k since msg.len is u16
1164 * 1214 *
1165 * Returns negative errno, or else the number of bytes read. 1215 * Returns negative errno, or else the number of bytes read.
1166 */ 1216 */
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
new file mode 100644
index 000000000000..421278221243
--- /dev/null
+++ b/drivers/i2c/i2c-smbus.c
@@ -0,0 +1,263 @@
1/*
2 * i2c-smbus.c - SMBus extensions to the I2C protocol
3 *
4 * Copyright (C) 2008 David Brownell
5 * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/device.h>
25#include <linux/semaphore.h>
26#include <linux/interrupt.h>
27#include <linux/workqueue.h>
28#include <linux/i2c.h>
29#include <linux/i2c-smbus.h>
30
31struct i2c_smbus_alert {
32 unsigned int alert_edge_triggered:1;
33 int irq;
34 struct work_struct alert;
35 struct i2c_client *ara; /* Alert response address */
36};
37
38struct alert_data {
39 unsigned short addr;
40 u8 flag:1;
41};
42
43/* If this is the alerting device, notify its driver */
44static int smbus_do_alert(struct device *dev, void *addrp)
45{
46 struct i2c_client *client = i2c_verify_client(dev);
47 struct alert_data *data = addrp;
48
49 if (!client || client->addr != data->addr)
50 return 0;
51 if (client->flags & I2C_CLIENT_TEN)
52 return 0;
53
54 /*
55 * Drivers should either disable alerts, or provide at least
56 * a minimal handler. Lock so client->driver won't change.
57 */
58 down(&dev->sem);
59 if (client->driver) {
60 if (client->driver->alert)
61 client->driver->alert(client, data->flag);
62 else
63 dev_warn(&client->dev, "no driver alert()!\n");
64 } else
65 dev_dbg(&client->dev, "alert with no driver\n");
66 up(&dev->sem);
67
68 /* Stop iterating after we find the device */
69 return -EBUSY;
70}
71
72/*
73 * The alert IRQ handler needs to hand work off to a task which can issue
74 * SMBus calls, because those sleeping calls can't be made in IRQ context.
75 */
76static void smbus_alert(struct work_struct *work)
77{
78 struct i2c_smbus_alert *alert;
79 struct i2c_client *ara;
80 unsigned short prev_addr = 0; /* Not a valid address */
81
82 alert = container_of(work, struct i2c_smbus_alert, alert);
83 ara = alert->ara;
84
85 for (;;) {
86 s32 status;
87 struct alert_data data;
88
89 /*
90 * Devices with pending alerts reply in address order, low
91 * to high, because of slave transmit arbitration. After
92 * responding, an SMBus device stops asserting SMBALERT#.
93 *
94 * Note that SMBus 2.0 reserves 10-bit addresess for future
95 * use. We neither handle them, nor try to use PEC here.
96 */
97 status = i2c_smbus_read_byte(ara);
98 if (status < 0)
99 break;
100
101 data.flag = status & 1;
102 data.addr = status >> 1;
103
104 if (data.addr == prev_addr) {
105 dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
106 "0x%02x, skipping\n", data.addr);
107 break;
108 }
109 dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
110 data.addr, data.flag);
111
112 /* Notify driver for the device which issued the alert */
113 device_for_each_child(&ara->adapter->dev, &data,
114 smbus_do_alert);
115 prev_addr = data.addr;
116 }
117
118 /* We handled all alerts; re-enable level-triggered IRQs */
119 if (!alert->alert_edge_triggered)
120 enable_irq(alert->irq);
121}
122
123static irqreturn_t smbalert_irq(int irq, void *d)
124{
125 struct i2c_smbus_alert *alert = d;
126
127 /* Disable level-triggered IRQs until we handle them */
128 if (!alert->alert_edge_triggered)
129 disable_irq_nosync(irq);
130
131 schedule_work(&alert->alert);
132 return IRQ_HANDLED;
133}
134
135/* Setup SMBALERT# infrastructure */
136static int smbalert_probe(struct i2c_client *ara,
137 const struct i2c_device_id *id)
138{
139 struct i2c_smbus_alert_setup *setup = ara->dev.platform_data;
140 struct i2c_smbus_alert *alert;
141 struct i2c_adapter *adapter = ara->adapter;
142 int res;
143
144 alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL);
145 if (!alert)
146 return -ENOMEM;
147
148 alert->alert_edge_triggered = setup->alert_edge_triggered;
149 alert->irq = setup->irq;
150 INIT_WORK(&alert->alert, smbus_alert);
151 alert->ara = ara;
152
153 if (setup->irq > 0) {
154 res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
155 0, "smbus_alert", alert);
156 if (res) {
157 kfree(alert);
158 return res;
159 }
160 }
161
162 i2c_set_clientdata(ara, alert);
163 dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n",
164 setup->alert_edge_triggered ? "edge" : "level");
165
166 return 0;
167}
168
169/* IRQ resource is managed so it is freed automatically */
170static int smbalert_remove(struct i2c_client *ara)
171{
172 struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
173
174 cancel_work_sync(&alert->alert);
175
176 i2c_set_clientdata(ara, NULL);
177 kfree(alert);
178 return 0;
179}
180
181static const struct i2c_device_id smbalert_ids[] = {
182 { "smbus_alert", 0 },
183 { /* LIST END */ }
184};
185MODULE_DEVICE_TABLE(i2c, smbalert_ids);
186
187static struct i2c_driver smbalert_driver = {
188 .driver = {
189 .name = "smbus_alert",
190 },
191 .probe = smbalert_probe,
192 .remove = smbalert_remove,
193 .id_table = smbalert_ids,
194};
195
196/**
197 * i2c_setup_smbus_alert - Setup SMBus alert support
198 * @adapter: the target adapter
199 * @setup: setup data for the SMBus alert handler
200 * Context: can sleep
201 *
202 * Setup handling of the SMBus alert protocol on a given I2C bus segment.
203 *
204 * Handling can be done either through our IRQ handler, or by the
205 * adapter (from its handler, periodic polling, or whatever).
206 *
207 * NOTE that if we manage the IRQ, we *MUST* know if it's level or
208 * edge triggered in order to hand it to the workqueue correctly.
209 * If triggering the alert seems to wedge the system, you probably
210 * should have said it's level triggered.
211 *
212 * This returns the ara client, which should be saved for later use with
213 * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
214 * to indicate an error.
215 */
216struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
217 struct i2c_smbus_alert_setup *setup)
218{
219 struct i2c_board_info ara_board_info = {
220 I2C_BOARD_INFO("smbus_alert", 0x0c),
221 .platform_data = setup,
222 };
223
224 return i2c_new_device(adapter, &ara_board_info);
225}
226EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
227
228/**
229 * i2c_handle_smbus_alert - Handle an SMBus alert
230 * @ara: the ARA client on the relevant adapter
231 * Context: can't sleep
232 *
233 * Helper function to be called from an I2C bus driver's interrupt
234 * handler. It will schedule the alert work, in turn calling the
235 * corresponding I2C device driver's alert function.
236 *
237 * It is assumed that ara is a valid i2c client previously returned by
238 * i2c_setup_smbus_alert().
239 */
240int i2c_handle_smbus_alert(struct i2c_client *ara)
241{
242 struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
243
244 return schedule_work(&alert->alert);
245}
246EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
247
248static int __init i2c_smbus_init(void)
249{
250 return i2c_add_driver(&smbalert_driver);
251}
252
253static void __exit i2c_smbus_exit(void)
254{
255 i2c_del_driver(&smbalert_driver);
256}
257
258module_init(i2c_smbus_init);
259module_exit(i2c_smbus_exit);
260
261MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
262MODULE_DESCRIPTION("SMBus protocol extensions support");
263MODULE_LICENSE("GPL");
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
index 878f8ec6dbe1..57d00caefc86 100644
--- a/drivers/ide/aec62xx.c
+++ b/drivers/ide/aec62xx.c
@@ -81,15 +81,15 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr
81 return chipset_table->ultra_settings; 81 return chipset_table->ultra_settings;
82} 82}
83 83
84static void aec6210_set_mode(ide_drive_t *drive, const u8 speed) 84static void aec6210_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
85{ 85{
86 ide_hwif_t *hwif = drive->hwif;
87 struct pci_dev *dev = to_pci_dev(hwif->dev); 86 struct pci_dev *dev = to_pci_dev(hwif->dev);
88 struct ide_host *host = pci_get_drvdata(dev); 87 struct ide_host *host = pci_get_drvdata(dev);
89 struct chipset_bus_clock_list_entry *bus_clock = host->host_priv; 88 struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
90 u16 d_conf = 0; 89 u16 d_conf = 0;
91 u8 ultra = 0, ultra_conf = 0; 90 u8 ultra = 0, ultra_conf = 0;
92 u8 tmp0 = 0, tmp1 = 0, tmp2 = 0; 91 u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
92 const u8 speed = drive->dma_mode;
93 unsigned long flags; 93 unsigned long flags;
94 94
95 local_irq_save(flags); 95 local_irq_save(flags);
@@ -109,15 +109,15 @@ static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
109 local_irq_restore(flags); 109 local_irq_restore(flags);
110} 110}
111 111
112static void aec6260_set_mode(ide_drive_t *drive, const u8 speed) 112static void aec6260_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
113{ 113{
114 ide_hwif_t *hwif = drive->hwif;
115 struct pci_dev *dev = to_pci_dev(hwif->dev); 114 struct pci_dev *dev = to_pci_dev(hwif->dev);
116 struct ide_host *host = pci_get_drvdata(dev); 115 struct ide_host *host = pci_get_drvdata(dev);
117 struct chipset_bus_clock_list_entry *bus_clock = host->host_priv; 116 struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
118 u8 unit = drive->dn & 1; 117 u8 unit = drive->dn & 1;
119 u8 tmp1 = 0, tmp2 = 0; 118 u8 tmp1 = 0, tmp2 = 0;
120 u8 ultra = 0, drive_conf = 0, ultra_conf = 0; 119 u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
120 const u8 speed = drive->dma_mode;
121 unsigned long flags; 121 unsigned long flags;
122 122
123 local_irq_save(flags); 123 local_irq_save(flags);
@@ -134,9 +134,10 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
134 local_irq_restore(flags); 134 local_irq_restore(flags);
135} 135}
136 136
137static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio) 137static void aec_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
138{ 138{
139 drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0); 139 drive->dma_mode = drive->pio_mode;
140 hwif->port_ops->set_dma_mode(hwif, drive);
140} 141}
141 142
142static int init_chipset_aec62xx(struct pci_dev *dev) 143static int init_chipset_aec62xx(struct pci_dev *dev)
diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c
index 90da1f953ed0..25b9fe3a9f8e 100644
--- a/drivers/ide/ali14xx.c
+++ b/drivers/ide/ali14xx.c
@@ -109,13 +109,14 @@ static DEFINE_SPINLOCK(ali14xx_lock);
109 * This function computes timing parameters 109 * This function computes timing parameters
110 * and sets controller registers accordingly. 110 * and sets controller registers accordingly.
111 */ 111 */
112static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio) 112static void ali14xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
113{ 113{
114 int driveNum; 114 int driveNum;
115 int time1, time2; 115 int time1, time2;
116 u8 param1, param2, param3, param4; 116 u8 param1, param2, param3, param4;
117 unsigned long flags; 117 unsigned long flags;
118 int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; 118 int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
119 const u8 pio = drive->pio_mode - XFER_PIO_0;
119 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); 120 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
120 121
121 /* calculate timing, according to PIO mode */ 122 /* calculate timing, according to PIO mode */
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 0abc43f3101e..2c8016ad0e26 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -8,7 +8,7 @@
8 * Copyright (C) 2002 Alan Cox 8 * Copyright (C) 2002 Alan Cox
9 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> 9 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
10 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com> 10 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
11 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> 11 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
12 * 12 *
13 * (U)DMA capable version of ali 1533/1543(C), 1535(D) 13 * (U)DMA capable version of ali 1533/1543(C), 1535(D)
14 * 14 *
@@ -48,61 +48,84 @@ static u8 m5229_revision;
48static u8 chip_is_1543c_e; 48static u8 chip_is_1543c_e;
49static struct pci_dev *isa_dev; 49static struct pci_dev *isa_dev;
50 50
51static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on)
52{
53 struct pci_dev *pdev = to_pci_dev(hwif->dev);
54 int pio_fifo = 0x54 + hwif->channel;
55 u8 fifo;
56 int shift = 4 * (drive->dn & 1);
57
58 pci_read_config_byte(pdev, pio_fifo, &fifo);
59 fifo &= ~(0x0F << shift);
60 fifo |= (on << shift);
61 pci_write_config_byte(pdev, pio_fifo, fifo);
62}
63
64static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive,
65 struct ide_timing *t, u8 ultra)
66{
67 struct pci_dev *dev = to_pci_dev(hwif->dev);
68 int port = hwif->channel ? 0x5c : 0x58;
69 int udmat = 0x56 + hwif->channel;
70 u8 unit = drive->dn & 1, udma;
71 int shift = 4 * unit;
72
73 /* Set up the UDMA */
74 pci_read_config_byte(dev, udmat, &udma);
75 udma &= ~(0x0F << shift);
76 udma |= ultra << shift;
77 pci_write_config_byte(dev, udmat, udma);
78
79 if (t == NULL)
80 return;
81
82 t->setup = clamp_val(t->setup, 1, 8) & 7;
83 t->act8b = clamp_val(t->act8b, 1, 8) & 7;
84 t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
85 t->active = clamp_val(t->active, 1, 8) & 7;
86 t->recover = clamp_val(t->recover, 1, 16) & 15;
87
88 pci_write_config_byte(dev, port, t->setup);
89 pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b);
90 pci_write_config_byte(dev, port + unit + 2,
91 (t->active << 4) | t->recover);
92}
93
51/** 94/**
52 * ali_set_pio_mode - set host controller for PIO mode 95 * ali_set_pio_mode - set host controller for PIO mode
96 * @hwif: port
53 * @drive: drive 97 * @drive: drive
54 * @pio: PIO mode number
55 * 98 *
56 * Program the controller for the given PIO mode. 99 * Program the controller for the given PIO mode.
57 */ 100 */
58 101
59static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) 102static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
60{ 103{
61 ide_hwif_t *hwif = drive->hwif; 104 ide_drive_t *pair = ide_get_pair_dev(drive);
62 struct pci_dev *dev = to_pci_dev(hwif->dev);
63 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
64 int s_time = t->setup, a_time = t->active, c_time = t->cycle;
65 u8 s_clc, a_clc, r_clc;
66 unsigned long flags;
67 int bus_speed = ide_pci_clk ? ide_pci_clk : 33; 105 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
68 int port = hwif->channel ? 0x5c : 0x58; 106 unsigned long T = 1000000 / bus_speed; /* PCI clock based */
69 int portFIFO = hwif->channel ? 0x55 : 0x54; 107 struct ide_timing t;
70 u8 cd_dma_fifo = 0, unit = drive->dn & 1; 108
71 109 ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
72 if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8) 110 if (pair) {
73 s_clc = 0; 111 struct ide_timing p;
74 if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8) 112
75 a_clc = 0; 113 ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
76 114 ide_timing_merge(&p, &t, &t,
77 if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) { 115 IDE_TIMING_SETUP | IDE_TIMING_8BIT);
78 r_clc = 1; 116 if (pair->dma_mode) {
79 } else { 117 ide_timing_compute(pair, pair->dma_mode, &p, T, 1);
80 if (r_clc >= 16) 118 ide_timing_merge(&p, &t, &t,
81 r_clc = 0; 119 IDE_TIMING_SETUP | IDE_TIMING_8BIT);
120 }
82 } 121 }
83 local_irq_save(flags); 122
84
85 /* 123 /*
86 * PIO mode => ATA FIFO on, ATAPI FIFO off 124 * PIO mode => ATA FIFO on, ATAPI FIFO off
87 */ 125 */
88 pci_read_config_byte(dev, portFIFO, &cd_dma_fifo); 126 ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00);
89 if (drive->media==ide_disk) { 127
90 if (unit) { 128 ali_program_timings(hwif, drive, &t, 0);
91 pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0x0F) | 0x50);
92 } else {
93 pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0xF0) | 0x05);
94 }
95 } else {
96 if (unit) {
97 pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0x0F);
98 } else {
99 pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0xF0);
100 }
101 }
102
103 pci_write_config_byte(dev, port, s_clc);
104 pci_write_config_byte(dev, port + unit + 2, (a_clc << 4) | r_clc);
105 local_irq_restore(flags);
106} 129}
107 130
108/** 131/**
@@ -132,44 +155,42 @@ static u8 ali_udma_filter(ide_drive_t *drive)
132 155
133/** 156/**
134 * ali_set_dma_mode - set host controller for DMA mode 157 * ali_set_dma_mode - set host controller for DMA mode
158 * @hwif: port
135 * @drive: drive 159 * @drive: drive
136 * @speed: DMA mode
137 * 160 *
138 * Configure the hardware for the desired IDE transfer mode. 161 * Configure the hardware for the desired IDE transfer mode.
139 */ 162 */
140 163
141static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed) 164static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
142{ 165{
143 ide_hwif_t *hwif = drive->hwif; 166 static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
144 struct pci_dev *dev = to_pci_dev(hwif->dev); 167 struct pci_dev *dev = to_pci_dev(hwif->dev);
145 u8 speed1 = speed; 168 ide_drive_t *pair = ide_get_pair_dev(drive);
146 u8 unit = drive->dn & 1; 169 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
170 unsigned long T = 1000000 / bus_speed; /* PCI clock based */
171 const u8 speed = drive->dma_mode;
147 u8 tmpbyte = 0x00; 172 u8 tmpbyte = 0x00;
148 int m5229_udma = (hwif->channel) ? 0x57 : 0x56; 173 struct ide_timing t;
149
150 if (speed == XFER_UDMA_6)
151 speed1 = 0x47;
152 174
153 if (speed < XFER_UDMA_0) { 175 if (speed < XFER_UDMA_0) {
154 u8 ultra_enable = (unit) ? 0x7f : 0xf7; 176 ide_timing_compute(drive, drive->dma_mode, &t, T, 1);
155 /* 177 if (pair) {
156 * clear "ultra enable" bit 178 struct ide_timing p;
157 */ 179
158 pci_read_config_byte(dev, m5229_udma, &tmpbyte); 180 ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
159 tmpbyte &= ultra_enable; 181 ide_timing_merge(&p, &t, &t,
160 pci_write_config_byte(dev, m5229_udma, tmpbyte); 182 IDE_TIMING_SETUP | IDE_TIMING_8BIT);
161 183 if (pair->dma_mode) {
162 /* 184 ide_timing_compute(pair, pair->dma_mode,
163 * FIXME: Oh, my... DMA timings are never set. 185 &p, T, 1);
164 */ 186 ide_timing_merge(&p, &t, &t,
187 IDE_TIMING_SETUP | IDE_TIMING_8BIT);
188 }
189 }
190 ali_program_timings(hwif, drive, &t, 0);
165 } else { 191 } else {
166 pci_read_config_byte(dev, m5229_udma, &tmpbyte); 192 ali_program_timings(hwif, drive, NULL,
167 tmpbyte &= (0x0f << ((1-unit) << 2)); 193 udma_timing[speed - XFER_UDMA_0]);
168 /*
169 * enable ultra dma and set timing
170 */
171 tmpbyte |= ((0x08 | ((4-speed1)&0x07)) << (unit << 2));
172 pci_write_config_byte(dev, m5229_udma, tmpbyte);
173 if (speed >= XFER_UDMA_3) { 194 if (speed >= XFER_UDMA_3) {
174 pci_read_config_byte(dev, 0x4b, &tmpbyte); 195 pci_read_config_byte(dev, 0x4b, &tmpbyte);
175 tmpbyte |= 1; 196 tmpbyte |= 1;
@@ -355,19 +376,13 @@ static int ali_cable_override(struct pci_dev *pdev)
355 * 376 *
356 * This checks if the controller and the cable are capable 377 * This checks if the controller and the cable are capable
357 * of UDMA66 transfers. It doesn't check the drives. 378 * of UDMA66 transfers. It doesn't check the drives.
358 * But see note 2 below!
359 *
360 * FIXME: frobs bits that are not defined on newer ALi devicea
361 */ 379 */
362 380
363static u8 ali_cable_detect(ide_hwif_t *hwif) 381static u8 ali_cable_detect(ide_hwif_t *hwif)
364{ 382{
365 struct pci_dev *dev = to_pci_dev(hwif->dev); 383 struct pci_dev *dev = to_pci_dev(hwif->dev);
366 unsigned long flags;
367 u8 cbl = ATA_CBL_PATA40, tmpbyte; 384 u8 cbl = ATA_CBL_PATA40, tmpbyte;
368 385
369 local_irq_save(flags);
370
371 if (m5229_revision >= 0xC2) { 386 if (m5229_revision >= 0xC2) {
372 /* 387 /*
373 * m5229 80-pin cable detection (from Host View) 388 * m5229 80-pin cable detection (from Host View)
@@ -387,8 +402,6 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
387 } 402 }
388 } 403 }
389 404
390 local_irq_restore(flags);
391
392 return cbl; 405 return cbl;
393} 406}
394 407
@@ -584,6 +597,6 @@ static void __exit ali15x3_ide_exit(void)
584module_init(ali15x3_ide_init); 597module_init(ali15x3_ide_init);
585module_exit(ali15x3_ide_exit); 598module_exit(ali15x3_ide_exit);
586 599
587MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox"); 600MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz");
588MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE"); 601MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE");
589MODULE_LICENSE("GPL"); 602MODULE_LICENSE("GPL");
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 628cd2e5fed8..3747b2561f09 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -3,7 +3,7 @@
3 * IDE driver for Linux. 3 * IDE driver for Linux.
4 * 4 *
5 * Copyright (c) 2000-2002 Vojtech Pavlik 5 * Copyright (c) 2000-2002 Vojtech Pavlik
6 * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz 6 * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
7 * 7 *
8 * Based on the work of: 8 * Based on the work of:
9 * Andre Hedrick 9 * Andre Hedrick
@@ -70,7 +70,8 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
70 default: return; 70 default: return;
71 } 71 }
72 72
73 pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + (3 - dn), t); 73 if (timing->udma)
74 pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + 3 - dn, t);
74} 75}
75 76
76/* 77/*
@@ -78,14 +79,14 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
78 * to a desired transfer mode. It also can be called by upper layers. 79 * to a desired transfer mode. It also can be called by upper layers.
79 */ 80 */
80 81
81static void amd_set_drive(ide_drive_t *drive, const u8 speed) 82static void amd_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
82{ 83{
83 ide_hwif_t *hwif = drive->hwif;
84 struct pci_dev *dev = to_pci_dev(hwif->dev); 84 struct pci_dev *dev = to_pci_dev(hwif->dev);
85 ide_drive_t *peer = ide_get_pair_dev(drive); 85 ide_drive_t *peer = ide_get_pair_dev(drive);
86 struct ide_timing t, p; 86 struct ide_timing t, p;
87 int T, UT; 87 int T, UT;
88 u8 udma_mask = hwif->ultra_mask; 88 u8 udma_mask = hwif->ultra_mask;
89 const u8 speed = drive->dma_mode;
89 90
90 T = 1000000000 / amd_clock; 91 T = 1000000000 / amd_clock;
91 UT = (udma_mask == ATA_UDMA2) ? T : (T / 2); 92 UT = (udma_mask == ATA_UDMA2) ? T : (T / 2);
@@ -93,7 +94,7 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed)
93 ide_timing_compute(drive, speed, &t, T, UT); 94 ide_timing_compute(drive, speed, &t, T, UT);
94 95
95 if (peer) { 96 if (peer) {
96 ide_timing_compute(peer, peer->current_speed, &p, T, UT); 97 ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
97 ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT); 98 ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
98 } 99 }
99 100
@@ -107,9 +108,10 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed)
107 * amd_set_pio_mode() is a callback from upper layers for PIO-only tuning. 108 * amd_set_pio_mode() is a callback from upper layers for PIO-only tuning.
108 */ 109 */
109 110
110static void amd_set_pio_mode(ide_drive_t *drive, const u8 pio) 111static void amd_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
111{ 112{
112 amd_set_drive(drive, XFER_PIO_0 + pio); 113 drive->dma_mode = drive->pio_mode;
114 amd_set_drive(hwif, drive);
113} 115}
114 116
115static void amd7409_cable_detect(struct pci_dev *dev) 117static void amd7409_cable_detect(struct pci_dev *dev)
@@ -340,6 +342,6 @@ static void __exit amd74xx_ide_exit(void)
340module_init(amd74xx_ide_init); 342module_init(amd74xx_ide_init);
341module_exit(amd74xx_ide_exit); 343module_exit(amd74xx_ide_exit);
342 344
343MODULE_AUTHOR("Vojtech Pavlik"); 345MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz");
344MODULE_DESCRIPTION("AMD PCI IDE driver"); 346MODULE_DESCRIPTION("AMD PCI IDE driver");
345MODULE_LICENSE("GPL"); 347MODULE_LICENSE("GPL");
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 248219a89a68..000a78e5246c 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -172,11 +172,12 @@ static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
172 leave_16bit(chipselect, mode); 172 leave_16bit(chipselect, mode);
173} 173}
174 174
175static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 175static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
176{ 176{
177 struct ide_timing *timing; 177 struct ide_timing *timing;
178 u8 chipselect = drive->hwif->select_data; 178 u8 chipselect = hwif->select_data;
179 int use_iordy = 0; 179 int use_iordy = 0;
180 const u8 pio = drive->pio_mode - XFER_PIO_0;
180 181
181 pdbg("chipselect %u pio %u\n", chipselect, pio); 182 pdbg("chipselect %u pio %u\n", chipselect, pio);
182 183
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index 837322b10a4c..15f0ead89f5c 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -42,19 +42,20 @@ static DEFINE_SPINLOCK(atiixp_lock);
42 42
43/** 43/**
44 * atiixp_set_pio_mode - set host controller for PIO mode 44 * atiixp_set_pio_mode - set host controller for PIO mode
45 * @hwif: port
45 * @drive: drive 46 * @drive: drive
46 * @pio: PIO mode number
47 * 47 *
48 * Set the interface PIO mode. 48 * Set the interface PIO mode.
49 */ 49 */
50 50
51static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio) 51static void atiixp_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
52{ 52{
53 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 53 struct pci_dev *dev = to_pci_dev(hwif->dev);
54 unsigned long flags; 54 unsigned long flags;
55 int timing_shift = (drive->dn ^ 1) * 8; 55 int timing_shift = (drive->dn ^ 1) * 8;
56 u32 pio_timing_data; 56 u32 pio_timing_data;
57 u16 pio_mode_data; 57 u16 pio_mode_data;
58 const u8 pio = drive->pio_mode - XFER_PIO_0;
58 59
59 spin_lock_irqsave(&atiixp_lock, flags); 60 spin_lock_irqsave(&atiixp_lock, flags);
60 61
@@ -74,21 +75,22 @@ static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
74 75
75/** 76/**
76 * atiixp_set_dma_mode - set host controller for DMA mode 77 * atiixp_set_dma_mode - set host controller for DMA mode
78 * @hwif: port
77 * @drive: drive 79 * @drive: drive
78 * @speed: DMA mode
79 * 80 *
80 * Set a ATIIXP host controller to the desired DMA mode. This involves 81 * Set a ATIIXP host controller to the desired DMA mode. This involves
81 * programming the right timing data into the PCI configuration space. 82 * programming the right timing data into the PCI configuration space.
82 */ 83 */
83 84
84static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed) 85static void atiixp_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
85{ 86{
86 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 87 struct pci_dev *dev = to_pci_dev(hwif->dev);
87 unsigned long flags; 88 unsigned long flags;
88 int timing_shift = (drive->dn ^ 1) * 8; 89 int timing_shift = (drive->dn ^ 1) * 8;
89 u32 tmp32; 90 u32 tmp32;
90 u16 tmp16; 91 u16 tmp16;
91 u16 udma_ctl = 0; 92 u16 udma_ctl = 0;
93 const u8 speed = drive->dma_mode;
92 94
93 spin_lock_irqsave(&atiixp_lock, flags); 95 spin_lock_irqsave(&atiixp_lock, flags);
94 96
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 349a67bf1a36..b26c23416fa7 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -99,12 +99,11 @@ static void au1xxx_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
99} 99}
100#endif 100#endif
101 101
102static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio) 102static void au1xxx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
103{ 103{
104 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2); 104 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
105 105
106 /* set pio mode! */ 106 switch (drive->pio_mode - XFER_PIO_0) {
107 switch(pio) {
108 case 0: 107 case 0:
109 mem_sttime = SBC_IDE_TIMING(PIO0); 108 mem_sttime = SBC_IDE_TIMING(PIO0);
110 109
@@ -161,11 +160,11 @@ static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
161 au_writel(mem_stcfg,MEM_STCFG2); 160 au_writel(mem_stcfg,MEM_STCFG2);
162} 161}
163 162
164static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed) 163static void auide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
165{ 164{
166 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2); 165 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
167 166
168 switch(speed) { 167 switch (drive->dma_mode) {
169#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 168#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
170 case XFER_MW_DMA_2: 169 case XFER_MW_DMA_2:
171 mem_sttime = SBC_IDE_TIMING(MDMA2); 170 mem_sttime = SBC_IDE_TIMING(MDMA2);
@@ -297,8 +296,8 @@ static int auide_dma_test_irq(ide_drive_t *drive)
297 */ 296 */
298 drive->waiting_for_dma++; 297 drive->waiting_for_dma++;
299 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { 298 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
300 printk(KERN_WARNING "%s: timeout waiting for ddma to \ 299 printk(KERN_WARNING "%s: timeout waiting for ddma to complete\n",
301 complete\n", drive->name); 300 drive->name);
302 return 1; 301 return 1;
303 } 302 }
304 udelay(10); 303 udelay(10);
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index 1a32d62ed86b..d2b8b272bc27 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -572,9 +572,10 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
572 program_drive_counts(drive, index); 572 program_drive_counts(drive, index);
573} 573}
574 574
575static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio) 575static void cmd640_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
576{ 576{
577 unsigned int index = 0, cycle_time; 577 unsigned int index = 0, cycle_time;
578 const u8 pio = drive->pio_mode - XFER_PIO_0;
578 u8 b; 579 u8 b;
579 580
580 switch (pio) { 581 switch (pio) {
@@ -605,7 +606,7 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
605} 606}
606#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ 607#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
607 608
608static void cmd640_init_dev(ide_drive_t *drive) 609static void __init cmd640_init_dev(ide_drive_t *drive)
609{ 610{
610 unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1); 611 unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1);
611 612
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index f2500c8826bb..5f80312e636b 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 1998 David S. Miller (davem@redhat.com) 7 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
8 * 8 *
9 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> 9 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
10 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
10 * Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com> 11 * Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com>
11 */ 12 */
12 13
@@ -50,72 +51,42 @@
50#define UDIDETCR1 0x7B 51#define UDIDETCR1 0x7B
51#define DTPR1 0x7C 52#define DTPR1 0x7C
52 53
53static u8 quantize_timing(int timing, int quant) 54static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
54{
55 return (timing + quant - 1) / quant;
56}
57
58/*
59 * This routine calculates active/recovery counts and then writes them into
60 * the chipset registers.
61 */
62static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
63{ 55{
56 ide_hwif_t *hwif = drive->hwif;
64 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 57 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
65 int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : 33); 58 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
66 u8 cycle_count, active_count, recovery_count, drwtim; 59 const unsigned long T = 1000000 / bus_speed;
67 static const u8 recovery_values[] = 60 static const u8 recovery_values[] =
68 {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; 61 {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
62 static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
63 static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
69 static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3}; 64 static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3};
65 struct ide_timing t;
66 u8 arttim = 0;
70 67
71 cycle_count = quantize_timing( cycle_time, clock_time); 68 ide_timing_compute(drive, mode, &t, T, 0);
72 active_count = quantize_timing(active_time, clock_time);
73 recovery_count = cycle_count - active_count;
74 69
75 /* 70 /*
76 * In case we've got too long recovery phase, try to lengthen 71 * In case we've got too long recovery phase, try to lengthen
77 * the active phase 72 * the active phase
78 */ 73 */
79 if (recovery_count > 16) { 74 if (t.recover > 16) {
80 active_count += recovery_count - 16; 75 t.active += t.recover - 16;
81 recovery_count = 16; 76 t.recover = 16;
82 } 77 }
83 if (active_count > 16) /* shouldn't actually happen... */ 78 if (t.active > 16) /* shouldn't actually happen... */
84 active_count = 16; 79 t.active = 16;
85 80
86 /* 81 /*
87 * Convert values to internal chipset representation 82 * Convert values to internal chipset representation
88 */ 83 */
89 recovery_count = recovery_values[recovery_count]; 84 t.recover = recovery_values[t.recover];
90 active_count &= 0x0f; 85 t.active &= 0x0f;
91 86
92 /* Program the active/recovery counts into the DRWTIM register */ 87 /* Program the active/recovery counts into the DRWTIM register */
93 drwtim = (active_count << 4) | recovery_count; 88 pci_write_config_byte(dev, drwtim_regs[drive->dn],
94 (void) pci_write_config_byte(dev, drwtim_regs[drive->dn], drwtim); 89 (t.active << 4) | t.recover);
95}
96
97/*
98 * This routine writes into the chipset registers
99 * PIO setup/active/recovery timings.
100 */
101static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
102{
103 ide_hwif_t *hwif = drive->hwif;
104 struct pci_dev *dev = to_pci_dev(hwif->dev);
105 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
106 unsigned long setup_count;
107 unsigned int cycle_time;
108 u8 arttim = 0;
109
110 static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
111 static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
112
113 cycle_time = ide_pio_cycle_time(drive, pio);
114
115 program_cycle_times(drive, cycle_time, t->active);
116
117 setup_count = quantize_timing(t->setup,
118 1000 / (ide_pci_clk ? ide_pci_clk : 33));
119 90
120 /* 91 /*
121 * The primary channel has individual address setup timing registers 92 * The primary channel has individual address setup timing registers
@@ -126,15 +97,21 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
126 if (hwif->channel) { 97 if (hwif->channel) {
127 ide_drive_t *pair = ide_get_pair_dev(drive); 98 ide_drive_t *pair = ide_get_pair_dev(drive);
128 99
129 ide_set_drivedata(drive, (void *)setup_count); 100 if (pair) {
101 struct ide_timing tp;
130 102
131 if (pair) 103 ide_timing_compute(pair, pair->pio_mode, &tp, T, 0);
132 setup_count = max_t(u8, setup_count, 104 ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP);
133 (unsigned long)ide_get_drivedata(pair)); 105 if (pair->dma_mode) {
106 ide_timing_compute(pair, pair->dma_mode,
107 &tp, T, 0);
108 ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP);
109 }
110 }
134 } 111 }
135 112
136 if (setup_count > 5) /* shouldn't actually happen... */ 113 if (t.setup > 5) /* shouldn't actually happen... */
137 setup_count = 5; 114 t.setup = 5;
138 115
139 /* 116 /*
140 * Program the address setup clocks into the ARTTIM registers. 117 * Program the address setup clocks into the ARTTIM registers.
@@ -144,7 +121,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
144 if (hwif->channel) 121 if (hwif->channel)
145 arttim &= ~ARTTIM23_INTR_CH1; 122 arttim &= ~ARTTIM23_INTR_CH1;
146 arttim &= ~0xc0; 123 arttim &= ~0xc0;
147 arttim |= setup_values[setup_count]; 124 arttim |= setup_values[t.setup];
148 (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim); 125 (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim);
149} 126}
150 127
@@ -153,8 +130,10 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
153 * Special cases are 8: prefetch off, 9: prefetch on (both never worked) 130 * Special cases are 8: prefetch off, 9: prefetch on (both never worked)
154 */ 131 */
155 132
156static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio) 133static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
157{ 134{
135 const u8 pio = drive->pio_mode - XFER_PIO_0;
136
158 /* 137 /*
159 * Filter out the prefetch control values 138 * Filter out the prefetch control values
160 * to prevent PIO5 from being programmed 139 * to prevent PIO5 from being programmed
@@ -162,20 +141,18 @@ static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio)
162 if (pio == 8 || pio == 9) 141 if (pio == 8 || pio == 9)
163 return; 142 return;
164 143
165 cmd64x_tune_pio(drive, pio); 144 cmd64x_program_timings(drive, XFER_PIO_0 + pio);
166} 145}
167 146
168static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed) 147static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
169{ 148{
170 ide_hwif_t *hwif = drive->hwif;
171 struct pci_dev *dev = to_pci_dev(hwif->dev); 149 struct pci_dev *dev = to_pci_dev(hwif->dev);
172 u8 unit = drive->dn & 0x01; 150 u8 unit = drive->dn & 0x01;
173 u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0; 151 u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
152 const u8 speed = drive->dma_mode;
174 153
175 if (speed >= XFER_SW_DMA_0) { 154 pci_read_config_byte(dev, pciU, &regU);
176 (void) pci_read_config_byte(dev, pciU, &regU); 155 regU &= ~(unit ? 0xCA : 0x35);
177 regU &= ~(unit ? 0xCA : 0x35);
178 }
179 156
180 switch(speed) { 157 switch(speed) {
181 case XFER_UDMA_5: 158 case XFER_UDMA_5:
@@ -197,18 +174,13 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
197 regU |= unit ? 0xC2 : 0x31; 174 regU |= unit ? 0xC2 : 0x31;
198 break; 175 break;
199 case XFER_MW_DMA_2: 176 case XFER_MW_DMA_2:
200 program_cycle_times(drive, 120, 70);
201 break;
202 case XFER_MW_DMA_1: 177 case XFER_MW_DMA_1:
203 program_cycle_times(drive, 150, 80);
204 break;
205 case XFER_MW_DMA_0: 178 case XFER_MW_DMA_0:
206 program_cycle_times(drive, 480, 215); 179 cmd64x_program_timings(drive, speed);
207 break; 180 break;
208 } 181 }
209 182
210 if (speed >= XFER_SW_DMA_0) 183 pci_write_config_byte(dev, pciU, regU);
211 (void) pci_write_config_byte(dev, pciU, regU);
212} 184}
213 185
214static void cmd648_clear_irq(ide_drive_t *drive) 186static void cmd648_clear_irq(ide_drive_t *drive)
@@ -471,6 +443,6 @@ static void __exit cmd64x_ide_exit(void)
471module_init(cmd64x_ide_init); 443module_init(cmd64x_ide_init);
472module_exit(cmd64x_ide_exit); 444module_exit(cmd64x_ide_exit);
473 445
474MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick"); 446MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz");
475MODULE_DESCRIPTION("PCI driver module for CMD64x IDE"); 447MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
476MODULE_LICENSE("GPL"); 448MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 09f98ed0731f..2c1e5f7cd261 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -57,11 +57,11 @@ static struct pio_clocks cs5520_pio_clocks[]={
57 {1, 2, 1} 57 {1, 2, 1}
58}; 58};
59 59
60static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio) 60static void cs5520_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
61{ 61{
62 ide_hwif_t *hwif = drive->hwif;
63 struct pci_dev *pdev = to_pci_dev(hwif->dev); 62 struct pci_dev *pdev = to_pci_dev(hwif->dev);
64 int controller = drive->dn > 1 ? 1 : 0; 63 int controller = drive->dn > 1 ? 1 : 0;
64 const u8 pio = drive->pio_mode - XFER_PIO_0;
65 65
66 /* 8bit CAT/CRT - 8bit command timing for channel */ 66 /* 8bit CAT/CRT - 8bit command timing for channel */
67 pci_write_config_byte(pdev, 0x62 + controller, 67 pci_write_config_byte(pdev, 0x62 + controller,
@@ -81,11 +81,12 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
81 (cs5520_pio_clocks[pio].assert)); 81 (cs5520_pio_clocks[pio].assert));
82} 82}
83 83
84static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed) 84static void cs5520_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
85{ 85{
86 printk(KERN_ERR "cs55x0: bad ide timing.\n"); 86 printk(KERN_ERR "cs55x0: bad ide timing.\n");
87 87
88 cs5520_set_pio_mode(drive, 0); 88 drive->pio_mode = XFER_PIO_0 + 0;
89 cs5520_set_pio_mode(hwif, drive);
89} 90}
90 91
91static const struct ide_port_ops cs5520_port_ops = { 92static const struct ide_port_ops cs5520_port_ops = {
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
index 40bf05eddf6e..4dc4eb92b076 100644
--- a/drivers/ide/cs5530.c
+++ b/drivers/ide/cs5530.c
@@ -41,8 +41,8 @@ static unsigned int cs5530_pio_timings[2][5] = {
41 41
42/** 42/**
43 * cs5530_set_pio_mode - set host controller for PIO mode 43 * cs5530_set_pio_mode - set host controller for PIO mode
44 * @hwif: port
44 * @drive: drive 45 * @drive: drive
45 * @pio: PIO mode number
46 * 46 *
47 * Handles setting of PIO mode for the chipset. 47 * Handles setting of PIO mode for the chipset.
48 * 48 *
@@ -50,10 +50,11 @@ static unsigned int cs5530_pio_timings[2][5] = {
50 * will have valid default PIO timings set up before we get here. 50 * will have valid default PIO timings set up before we get here.
51 */ 51 */
52 52
53static void cs5530_set_pio_mode(ide_drive_t *drive, const u8 pio) 53static void cs5530_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
54{ 54{
55 unsigned long basereg = CS5530_BASEREG(drive->hwif); 55 unsigned long basereg = CS5530_BASEREG(hwif);
56 unsigned int format = (inl(basereg + 4) >> 31) & 1; 56 unsigned int format = (inl(basereg + 4) >> 31) & 1;
57 const u8 pio = drive->pio_mode - XFER_PIO_0;
57 58
58 outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3)); 59 outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
59} 60}
@@ -99,12 +100,12 @@ out:
99 return mask; 100 return mask;
100} 101}
101 102
102static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode) 103static void cs5530_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
103{ 104{
104 unsigned long basereg; 105 unsigned long basereg;
105 unsigned int reg, timings = 0; 106 unsigned int reg, timings = 0;
106 107
107 switch (mode) { 108 switch (drive->dma_mode) {
108 case XFER_UDMA_0: timings = 0x00921250; break; 109 case XFER_UDMA_0: timings = 0x00921250; break;
109 case XFER_UDMA_1: timings = 0x00911140; break; 110 case XFER_UDMA_1: timings = 0x00911140; break;
110 case XFER_UDMA_2: timings = 0x00911030; break; 111 case XFER_UDMA_2: timings = 0x00911030; break;
@@ -112,7 +113,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
112 case XFER_MW_DMA_1: timings = 0x00012121; break; 113 case XFER_MW_DMA_1: timings = 0x00012121; break;
113 case XFER_MW_DMA_2: timings = 0x00002020; break; 114 case XFER_MW_DMA_2: timings = 0x00002020; break;
114 } 115 }
115 basereg = CS5530_BASEREG(drive->hwif); 116 basereg = CS5530_BASEREG(hwif);
116 reg = inl(basereg + 4); /* get drive0 config register */ 117 reg = inl(basereg + 4); /* get drive0 config register */
117 timings |= reg & 0x80000000; /* preserve PIO format bit */ 118 timings |= reg & 0x80000000; /* preserve PIO format bit */
118 if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */ 119 if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
index b883838adc24..5059fafadf29 100644
--- a/drivers/ide/cs5535.c
+++ b/drivers/ide/cs5535.c
@@ -86,7 +86,7 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
86 cmd = pioa = speed - XFER_PIO_0; 86 cmd = pioa = speed - XFER_PIO_0;
87 87
88 if (pair) { 88 if (pair) {
89 u8 piob = ide_get_best_pio_mode(pair, 255, 4); 89 u8 piob = pair->pio_mode - XFER_PIO_0;
90 90
91 if (piob < cmd) 91 if (piob < cmd)
92 cmd = piob; 92 cmd = piob;
@@ -129,28 +129,28 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
129 129
130/** 130/**
131 * cs5535_set_dma_mode - set host controller for DMA mode 131 * cs5535_set_dma_mode - set host controller for DMA mode
132 * @hwif: port
132 * @drive: drive 133 * @drive: drive
133 * @speed: DMA mode
134 * 134 *
135 * Programs the chipset for DMA mode. 135 * Programs the chipset for DMA mode.
136 */ 136 */
137 137
138static void cs5535_set_dma_mode(ide_drive_t *drive, const u8 speed) 138static void cs5535_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
139{ 139{
140 cs5535_set_speed(drive, speed); 140 cs5535_set_speed(drive, drive->dma_mode);
141} 141}
142 142
143/** 143/**
144 * cs5535_set_pio_mode - set host controller for PIO mode 144 * cs5535_set_pio_mode - set host controller for PIO mode
145 * @hwif: port
145 * @drive: drive 146 * @drive: drive
146 * @pio: PIO mode number
147 * 147 *
148 * A callback from the upper layers for PIO-only tuning. 148 * A callback from the upper layers for PIO-only tuning.
149 */ 149 */
150 150
151static void cs5535_set_pio_mode(ide_drive_t *drive, const u8 pio) 151static void cs5535_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
152{ 152{
153 cs5535_set_speed(drive, XFER_PIO_0 + pio); 153 cs5535_set_speed(drive, drive->pio_mode);
154} 154}
155 155
156static u8 cs5535_cable_detect(ide_hwif_t *hwif) 156static u8 cs5535_cable_detect(ide_hwif_t *hwif)
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
index 9623b852c616..24214ab60ac0 100644
--- a/drivers/ide/cs5536.c
+++ b/drivers/ide/cs5536.c
@@ -125,11 +125,11 @@ static u8 cs5536_cable_detect(ide_hwif_t *hwif)
125 125
126/** 126/**
127 * cs5536_set_pio_mode - PIO timing setup 127 * cs5536_set_pio_mode - PIO timing setup
128 * @hwif: ATA port
128 * @drive: ATA device 129 * @drive: ATA device
129 * @pio: PIO mode number
130 */ 130 */
131 131
132static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio) 132static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
133{ 133{
134 static const u8 drv_timings[5] = { 134 static const u8 drv_timings[5] = {
135 0x98, 0x55, 0x32, 0x21, 0x20, 135 0x98, 0x55, 0x32, 0x21, 0x20,
@@ -143,15 +143,16 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
143 0x99, 0x92, 0x90, 0x22, 0x20, 143 0x99, 0x92, 0x90, 0x22, 0x20,
144 }; 144 };
145 145
146 struct pci_dev *pdev = to_pci_dev(drive->hwif->dev); 146 struct pci_dev *pdev = to_pci_dev(hwif->dev);
147 ide_drive_t *pair = ide_get_pair_dev(drive); 147 ide_drive_t *pair = ide_get_pair_dev(drive);
148 int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; 148 int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
149 unsigned long timings = (unsigned long)ide_get_drivedata(drive); 149 unsigned long timings = (unsigned long)ide_get_drivedata(drive);
150 u32 cast; 150 u32 cast;
151 const u8 pio = drive->pio_mode - XFER_PIO_0;
151 u8 cmd_pio = pio; 152 u8 cmd_pio = pio;
152 153
153 if (pair) 154 if (pair)
154 cmd_pio = min(pio, ide_get_best_pio_mode(pair, 255, 4)); 155 cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0);
155 156
156 timings &= (IDE_DRV_MASK << 8); 157 timings &= (IDE_DRV_MASK << 8);
157 timings |= drv_timings[pio]; 158 timings |= drv_timings[pio];
@@ -172,11 +173,11 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
172 173
173/** 174/**
174 * cs5536_set_dma_mode - DMA timing setup 175 * cs5536_set_dma_mode - DMA timing setup
176 * @hwif: ATA port
175 * @drive: ATA device 177 * @drive: ATA device
176 * @mode: DMA mode
177 */ 178 */
178 179
179static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode) 180static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
180{ 181{
181 static const u8 udma_timings[6] = { 182 static const u8 udma_timings[6] = {
182 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, 183 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
@@ -186,10 +187,11 @@ static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode)
186 0x67, 0x21, 0x20, 187 0x67, 0x21, 0x20,
187 }; 188 };
188 189
189 struct pci_dev *pdev = to_pci_dev(drive->hwif->dev); 190 struct pci_dev *pdev = to_pci_dev(hwif->dev);
190 int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; 191 int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT;
191 unsigned long timings = (unsigned long)ide_get_drivedata(drive); 192 unsigned long timings = (unsigned long)ide_get_drivedata(drive);
192 u32 etc; 193 u32 etc;
194 const u8 mode = drive->dma_mode;
193 195
194 cs5536_read(pdev, ETC, &etc); 196 cs5536_read(pdev, ETC, &etc);
195 197
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index d6e2cbbc53a0..9383f67deae1 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -1,43 +1,11 @@
1/* 1/*
2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer 2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator 3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
4 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
4 * 5 *
5 * CYPRESS CY82C693 chipset IDE controller 6 * CYPRESS CY82C693 chipset IDE controller
6 * 7 *
7 * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards. 8 * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
8 * Writing the driver was quite simple, since most of the job is
9 * done by the generic pci-ide support.
10 * The hard part was finding the CY82C693's datasheet on Cypress's
11 * web page :-(. But Altavista solved this problem :-).
12 *
13 *
14 * Notes:
15 * - I recently got a 16.8G IBM DTTA, so I was able to test it with
16 * a large and fast disk - the results look great, so I'd say the
17 * driver is working fine :-)
18 * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
19 * - this is my first linux driver, so there's probably a lot of room
20 * for optimizations and bug fixing, so feel free to do it.
21 * - if using PIO mode it's a good idea to set the PIO mode and
22 * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
23 * - I had some problems with my IBM DHEA with PIO modes < 2
24 * (lost interrupts) ?????
25 * - first tests with DMA look okay, they seem to work, but there is a
26 * problem with sound - the BusMaster IDE TimeOut should fixed this
27 *
28 * Ancient History:
29 * AMH@1999-08-24: v0.34 init_cy82c693_chip moved to pci_init_cy82c693
30 * ASK@1999-01-23: v0.33 made a few minor code clean ups
31 * removed DMA clock speed setting by default
32 * added boot message
33 * ASK@1998-11-01: v0.32 added support to set BusMaster IDE TimeOut
34 * added support to set DMA Controller Clock Speed
35 * ASK@1998-10-31: v0.31 fixed problem with setting to high DMA modes
36 * on some drives.
37 * ASK@1998-10-29: v0.3 added support to set DMA modes
38 * ASK@1998-10-28: v0.2 added support to set PIO modes
39 * ASK@1998-10-27: v0.1 first version - chipset detection
40 *
41 */ 9 */
42 10
43#include <linux/module.h> 11#include <linux/module.h>
@@ -81,87 +49,13 @@
81#define CY82_INDEX_CHANNEL1 0x31 49#define CY82_INDEX_CHANNEL1 0x31
82#define CY82_INDEX_TIMEOUT 0x32 50#define CY82_INDEX_TIMEOUT 0x32
83 51
84/* the min and max PCI bus speed in MHz - from datasheet */
85#define CY82C963_MIN_BUS_SPEED 25
86#define CY82C963_MAX_BUS_SPEED 33
87
88/* the struct for the PIO mode timings */
89typedef struct pio_clocks_s {
90 u8 address_time; /* Address setup (clocks) */
91 u8 time_16r; /* clocks for 16bit IOR (0xF0=Active/data, 0x0F=Recovery) */
92 u8 time_16w; /* clocks for 16bit IOW (0xF0=Active/data, 0x0F=Recovery) */
93 u8 time_8; /* clocks for 8bit (0xF0=Active/data, 0x0F=Recovery) */
94} pio_clocks_t;
95
96/*
97 * calc clocks using bus_speed
98 * returns (rounded up) time in bus clocks for time in ns
99 */
100static int calc_clk(int time, int bus_speed)
101{
102 int clocks;
103
104 clocks = (time*bus_speed+999)/1000 - 1;
105
106 if (clocks < 0)
107 clocks = 0;
108
109 if (clocks > 0x0F)
110 clocks = 0x0F;
111
112 return clocks;
113}
114
115/*
116 * compute the values for the clock registers for PIO
117 * mode and pci_clk [MHz] speed
118 *
119 * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used
120 * for mode 3 and 4 drives 8 and 16-bit timings are the same
121 *
122 */
123static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
124{
125 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
126 int clk1, clk2;
127 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
128
129 /* we don't check against CY82C693's min and max speed,
130 * so you can play with the idebus=xx parameter
131 */
132
133 /* let's calc the address setup time clocks */
134 p_pclk->address_time = (u8)calc_clk(t->setup, bus_speed);
135
136 /* let's calc the active and recovery time clocks */
137 clk1 = calc_clk(t->active, bus_speed);
138
139 /* calc recovery timing */
140 clk2 = t->cycle - t->active - t->setup;
141
142 clk2 = calc_clk(clk2, bus_speed);
143
144 clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */
145
146 /* note: we use the same values for 16bit IOR and IOW
147 * those are all the same, since I don't have other
148 * timings than those from ide-lib.c
149 */
150
151 p_pclk->time_16r = (u8)clk1;
152 p_pclk->time_16w = (u8)clk1;
153
154 /* what are good values for 8bit ?? */
155 p_pclk->time_8 = (u8)clk1;
156}
157
158/* 52/*
159 * set DMA mode a specific channel for CY82C693 53 * set DMA mode a specific channel for CY82C693
160 */ 54 */
161 55
162static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode) 56static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
163{ 57{
164 ide_hwif_t *hwif = drive->hwif; 58 const u8 mode = drive->dma_mode;
165 u8 single = (mode & 0x10) >> 4, index = 0, data = 0; 59 u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
166 60
167 index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0; 61 index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
@@ -186,12 +80,14 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
186 outb(data, CY82_DATA_PORT); 80 outb(data, CY82_DATA_PORT);
187} 81}
188 82
189static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio) 83static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
190{ 84{
191 ide_hwif_t *hwif = drive->hwif;
192 struct pci_dev *dev = to_pci_dev(hwif->dev); 85 struct pci_dev *dev = to_pci_dev(hwif->dev);
193 pio_clocks_t pclk; 86 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
87 const unsigned long T = 1000000 / bus_speed;
194 unsigned int addrCtrl; 88 unsigned int addrCtrl;
89 struct ide_timing t;
90 u8 time_16, time_8;
195 91
196 /* select primary or secondary channel */ 92 /* select primary or secondary channel */
197 if (hwif->index > 0) { /* drive is on the secondary channel */ 93 if (hwif->index > 0) { /* drive is on the secondary channel */
@@ -204,8 +100,12 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
204 } 100 }
205 } 101 }
206 102
207 /* let's calc the values for this PIO mode */ 103 ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
208 compute_clocks(pio, &pclk); 104
105 time_16 = clamp_val(t.recover - 1, 0, 15) |
106 (clamp_val(t.active - 1, 0, 15) << 4);
107 time_8 = clamp_val(t.act8b - 1, 0, 15) |
108 (clamp_val(t.rec8b - 1, 0, 15) << 4);
209 109
210 /* now let's write the clocks registers */ 110 /* now let's write the clocks registers */
211 if ((drive->dn & 1) == 0) { 111 if ((drive->dn & 1) == 0) {
@@ -217,13 +117,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
217 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 117 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
218 118
219 addrCtrl &= (~0xF); 119 addrCtrl &= (~0xF);
220 addrCtrl |= (unsigned int)pclk.address_time; 120 addrCtrl |= clamp_val(t.setup - 1, 0, 15);
221 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); 121 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
222 122
223 /* now let's set the remaining registers */ 123 /* now let's set the remaining registers */
224 pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r); 124 pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16);
225 pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w); 125 pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16);
226 pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8); 126 pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8);
227 } else { 127 } else {
228 /* 128 /*
229 * set slave drive 129 * set slave drive
@@ -233,13 +133,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
233 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); 133 pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
234 134
235 addrCtrl &= (~0xF0); 135 addrCtrl &= (~0xF0);
236 addrCtrl |= ((unsigned int)pclk.address_time<<4); 136 addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4);
237 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); 137 pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
238 138
239 /* now let's set the remaining registers */ 139 /* now let's set the remaining registers */
240 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, pclk.time_16r); 140 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16);
241 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, pclk.time_16w); 141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
242 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, pclk.time_8); 142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
243 } 143 }
244} 144}
245 145
@@ -325,6 +225,6 @@ static void __exit cy82c693_ide_exit(void)
325module_init(cy82c693_ide_init); 225module_init(cy82c693_ide_init);
326module_exit(cy82c693_ide_exit); 226module_exit(cy82c693_ide_exit);
327 227
328MODULE_AUTHOR("Andreas Krebs, Andre Hedrick"); 228MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz");
329MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE"); 229MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE");
330MODULE_LICENSE("GPL"); 230MODULE_LICENSE("GPL");
diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c
index c6b138122981..6929f7fce93a 100644
--- a/drivers/ide/dtc2278.c
+++ b/drivers/ide/dtc2278.c
@@ -68,11 +68,11 @@ static void sub22 (char b, char c)
68 68
69static DEFINE_SPINLOCK(dtc2278_lock); 69static DEFINE_SPINLOCK(dtc2278_lock);
70 70
71static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio) 71static void dtc2278_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
72{ 72{
73 unsigned long flags; 73 unsigned long flags;
74 74
75 if (pio >= 3) { 75 if (drive->pio_mode >= XFER_PIO_3) {
76 spin_lock_irqsave(&dtc2278_lock, flags); 76 spin_lock_irqsave(&dtc2278_lock, flags);
77 /* 77 /*
78 * This enables PIO mode4 (3?) on the first interface 78 * This enables PIO mode4 (3?) on the first interface
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 4d90ac2dbb1b..b885c1d548f5 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -627,14 +627,14 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
627 return info->timings->clock_table[info->clock][i]; 627 return info->timings->clock_table[info->clock][i];
628} 628}
629 629
630static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed) 630static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
631{ 631{
632 ide_hwif_t *hwif = drive->hwif;
633 struct pci_dev *dev = to_pci_dev(hwif->dev); 632 struct pci_dev *dev = to_pci_dev(hwif->dev);
634 struct hpt_info *info = hpt3xx_get_info(hwif->dev); 633 struct hpt_info *info = hpt3xx_get_info(hwif->dev);
635 struct hpt_timings *t = info->timings; 634 struct hpt_timings *t = info->timings;
636 u8 itr_addr = 0x40 + (drive->dn * 4); 635 u8 itr_addr = 0x40 + (drive->dn * 4);
637 u32 old_itr = 0; 636 u32 old_itr = 0;
637 const u8 speed = drive->dma_mode;
638 u32 new_itr = get_speed_setting(speed, info); 638 u32 new_itr = get_speed_setting(speed, info);
639 u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask : 639 u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask :
640 (speed < XFER_UDMA_0 ? t->dma_mask : 640 (speed < XFER_UDMA_0 ? t->dma_mask :
@@ -651,9 +651,10 @@ static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
651 pci_write_config_dword(dev, itr_addr, new_itr); 651 pci_write_config_dword(dev, itr_addr, new_itr);
652} 652}
653 653
654static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio) 654static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
655{ 655{
656 hpt3xx_set_mode(drive, XFER_PIO_0 + pio); 656 drive->dma_mode = drive->pio_mode;
657 hpt3xx_set_mode(hwif, drive);
657} 658}
658 659
659static void hpt3xx_maskproc(ide_drive_t *drive, int mask) 660static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index aafed8060e17..d81e49680c3f 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -279,9 +279,10 @@ static void ht_set_prefetch(ide_drive_t *drive, u8 state)
279#endif 279#endif
280} 280}
281 281
282static void ht6560b_set_pio_mode(ide_drive_t *drive, const u8 pio) 282static void ht6560b_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
283{ 283{
284 unsigned long flags, config; 284 unsigned long flags, config;
285 const u8 pio = drive->pio_mode - XFER_PIO_0;
285 u8 timing; 286 u8 timing;
286 287
287 switch (pio) { 288 switch (pio) {
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 0f67f1abbbd3..4a697a238e28 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -65,6 +65,8 @@ static struct cardinfo icside_cardinfo_v6_2 = {
65}; 65};
66 66
67struct icside_state { 67struct icside_state {
68 unsigned int channel;
69 unsigned int enabled;
68 void __iomem *irq_port; 70 void __iomem *irq_port;
69 void __iomem *ioc_base; 71 void __iomem *ioc_base;
70 unsigned int sel; 72 unsigned int sel;
@@ -114,11 +116,18 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
114 struct icside_state *state = ec->irq_data; 116 struct icside_state *state = ec->irq_data;
115 void __iomem *base = state->irq_port; 117 void __iomem *base = state->irq_port;
116 118
117 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); 119 state->enabled = 1;
118 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
119 120
120 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); 121 switch (state->channel) {
121 readb(base + ICS_ARCIN_V6_INTROFFSET_1); 122 case 0:
123 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
124 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
125 break;
126 case 1:
127 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
128 readb(base + ICS_ARCIN_V6_INTROFFSET_1);
129 break;
130 }
122} 131}
123 132
124/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) 133/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
@@ -128,6 +137,8 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
128{ 137{
129 struct icside_state *state = ec->irq_data; 138 struct icside_state *state = ec->irq_data;
130 139
140 state->enabled = 0;
141
131 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); 142 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
132 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); 143 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
133} 144}
@@ -149,6 +160,44 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
149 .irqpending = icside_irqpending_arcin_v6, 160 .irqpending = icside_irqpending_arcin_v6,
150}; 161};
151 162
163/*
164 * Handle routing of interrupts. This is called before
165 * we write the command to the drive.
166 */
167static void icside_maskproc(ide_drive_t *drive, int mask)
168{
169 ide_hwif_t *hwif = drive->hwif;
170 struct expansion_card *ec = ECARD_DEV(hwif->dev);
171 struct icside_state *state = ecard_get_drvdata(ec);
172 unsigned long flags;
173
174 local_irq_save(flags);
175
176 state->channel = hwif->channel;
177
178 if (state->enabled && !mask) {
179 switch (hwif->channel) {
180 case 0:
181 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
182 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
183 break;
184 case 1:
185 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
186 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
187 break;
188 }
189 } else {
190 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
191 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
192 }
193
194 local_irq_restore(flags);
195}
196
197static const struct ide_port_ops icside_v6_no_dma_port_ops = {
198 .maskproc = icside_maskproc,
199};
200
152#ifdef CONFIG_BLK_DEV_IDEDMA_ICS 201#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
153/* 202/*
154 * SG-DMA support. 203 * SG-DMA support.
@@ -185,10 +234,11 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
185 * MW1 80 50 50 150 C 234 * MW1 80 50 50 150 C
186 * MW2 70 25 25 120 C 235 * MW2 70 25 25 120 C
187 */ 236 */
188static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode) 237static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
189{ 238{
190 unsigned long cycle_time; 239 unsigned long cycle_time;
191 int use_dma_info = 0; 240 int use_dma_info = 0;
241 const u8 xfer_mode = drive->dma_mode;
192 242
193 switch (xfer_mode) { 243 switch (xfer_mode) {
194 case XFER_MW_DMA_2: 244 case XFER_MW_DMA_2:
@@ -228,6 +278,7 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
228 278
229static const struct ide_port_ops icside_v6_port_ops = { 279static const struct ide_port_ops icside_v6_port_ops = {
230 .set_dma_mode = icside_set_dma_mode, 280 .set_dma_mode = icside_set_dma_mode,
281 .maskproc = icside_maskproc,
231}; 282};
232 283
233static void icside_dma_host_set(ide_drive_t *drive, int on) 284static void icside_dma_host_set(ide_drive_t *drive, int on)
@@ -272,6 +323,11 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
272 BUG_ON(dma_channel_active(ec->dma)); 323 BUG_ON(dma_channel_active(ec->dma));
273 324
274 /* 325 /*
326 * Ensure that we have the right interrupt routed.
327 */
328 icside_maskproc(drive, 0);
329
330 /*
275 * Route the DMA signals to the correct interface. 331 * Route the DMA signals to the correct interface.
276 */ 332 */
277 writeb(state->sel | hwif->channel, state->ioc_base); 333 writeb(state->sel | hwif->channel, state->ioc_base);
@@ -399,6 +455,7 @@ err_free:
399 455
400static const struct ide_port_info icside_v6_port_info __initdata = { 456static const struct ide_port_info icside_v6_port_info __initdata = {
401 .init_dma = icside_dma_off_init, 457 .init_dma = icside_dma_off_init,
458 .port_ops = &icside_v6_no_dma_port_ops,
402 .dma_ops = &icside_v6_dma_ops, 459 .dma_ops = &icside_v6_dma_ops,
403 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 460 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
404 .mwdma_mask = ATA_MWDMA2, 461 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index dd6396384c25..ab87e4f7cec9 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -121,19 +121,11 @@ static int ide_probe(struct pcmcia_device *link)
121static void ide_detach(struct pcmcia_device *link) 121static void ide_detach(struct pcmcia_device *link)
122{ 122{
123 ide_info_t *info = link->priv; 123 ide_info_t *info = link->priv;
124 ide_hwif_t *hwif = info->host->ports[0];
125 unsigned long data_addr, ctl_addr;
126 124
127 dev_dbg(&link->dev, "ide_detach(0x%p)\n", link); 125 dev_dbg(&link->dev, "ide_detach(0x%p)\n", link);
128 126
129 data_addr = hwif->io_ports.data_addr;
130 ctl_addr = hwif->io_ports.ctl_addr;
131
132 ide_release(link); 127 ide_release(link);
133 128
134 release_region(ctl_addr, 1);
135 release_region(data_addr, 8);
136
137 kfree(info); 129 kfree(info);
138} /* ide_detach */ 130} /* ide_detach */
139 131
@@ -354,12 +346,19 @@ static void ide_release(struct pcmcia_device *link)
354 346
355 dev_dbg(&link->dev, "ide_release(0x%p)\n", link); 347 dev_dbg(&link->dev, "ide_release(0x%p)\n", link);
356 348
357 if (info->ndev) 349 if (info->ndev) {
358 /* FIXME: if this fails we need to queue the cleanup somehow 350 ide_hwif_t *hwif = host->ports[0];
359 -- need to investigate the required PCMCIA magic */ 351 unsigned long data_addr, ctl_addr;
352
353 data_addr = hwif->io_ports.data_addr;
354 ctl_addr = hwif->io_ports.ctl_addr;
355
360 ide_host_remove(host); 356 ide_host_remove(host);
357 info->ndev = 0;
361 358
362 info->ndev = 0; 359 release_region(ctl_addr, 1);
360 release_region(data_addr, 8);
361 }
363 362
364 pcmcia_disable_device(link); 363 pcmcia_disable_device(link);
365} /* ide_release */ 364} /* ide_release */
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 1099bf7cf968..c6935c78757c 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -105,15 +105,17 @@ static int set_pio_mode(ide_drive_t *drive, int arg)
105 return -ENOSYS; 105 return -ENOSYS;
106 106
107 if (set_pio_mode_abuse(drive->hwif, arg)) { 107 if (set_pio_mode_abuse(drive->hwif, arg)) {
108 drive->pio_mode = arg + XFER_PIO_0;
109
108 if (arg == 8 || arg == 9) { 110 if (arg == 8 || arg == 9) {
109 unsigned long flags; 111 unsigned long flags;
110 112
111 /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ 113 /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
112 spin_lock_irqsave(&hwif->lock, flags); 114 spin_lock_irqsave(&hwif->lock, flags);
113 port_ops->set_pio_mode(drive, arg); 115 port_ops->set_pio_mode(hwif, drive);
114 spin_unlock_irqrestore(&hwif->lock, flags); 116 spin_unlock_irqrestore(&hwif->lock, flags);
115 } else 117 } else
116 port_ops->set_pio_mode(drive, arg); 118 port_ops->set_pio_mode(hwif, drive);
117 } else { 119 } else {
118 int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 120 int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
119 121
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 222c1ef65fb9..376f2dc410c5 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -231,7 +231,7 @@ u8 eighty_ninty_three(ide_drive_t *drive)
231 u16 *id = drive->id; 231 u16 *id = drive->id;
232 int ivb = ide_in_drive_list(id, ivb_list); 232 int ivb = ide_in_drive_list(id, ivb_list);
233 233
234 if (hwif->cbl == ATA_CBL_PATA40_SHORT) 234 if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT)
235 return 1; 235 return 1;
236 236
237 if (ivb) 237 if (ivb)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index f8c1ae6ad74c..fbedd35feb44 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1042,6 +1042,8 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
1042 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) 1042 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
1043 drive->dev_flags |= IDE_DFLAG_NO_UNMASK; 1043 drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
1044 1044
1045 drive->pio_mode = XFER_PIO_0;
1046
1045 if (port_ops && port_ops->init_dev) 1047 if (port_ops && port_ops->init_dev)
1046 port_ops->init_dev(drive); 1048 port_ops->init_dev(drive);
1047 } 1049 }
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 6a0e62542167..b07232880ec9 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1365,7 +1365,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
1365 * supported here, and not in the corresponding block interface. Our own 1365 * supported here, and not in the corresponding block interface. Our own
1366 * ide-tape ioctls are supported on both interfaces. 1366 * ide-tape ioctls are supported on both interfaces.
1367 */ 1367 */
1368static int idetape_chrdev_ioctl(struct inode *inode, struct file *file, 1368static long do_idetape_chrdev_ioctl(struct file *file,
1369 unsigned int cmd, unsigned long arg) 1369 unsigned int cmd, unsigned long arg)
1370{ 1370{
1371 struct ide_tape_obj *tape = file->private_data; 1371 struct ide_tape_obj *tape = file->private_data;
@@ -1420,6 +1420,16 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
1420 } 1420 }
1421} 1421}
1422 1422
1423static long idetape_chrdev_ioctl(struct file *file,
1424 unsigned int cmd, unsigned long arg)
1425{
1426 long ret;
1427 lock_kernel();
1428 ret = do_idetape_chrdev_ioctl(file, cmd, arg);
1429 unlock_kernel();
1430 return ret;
1431}
1432
1423/* 1433/*
1424 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape 1434 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
1425 * block size with the reported value. 1435 * block size with the reported value.
@@ -1888,7 +1898,7 @@ static const struct file_operations idetape_fops = {
1888 .owner = THIS_MODULE, 1898 .owner = THIS_MODULE,
1889 .read = idetape_chrdev_read, 1899 .read = idetape_chrdev_read,
1890 .write = idetape_chrdev_write, 1900 .write = idetape_chrdev_write,
1891 .ioctl = idetape_chrdev_ioctl, 1901 .unlocked_ioctl = idetape_chrdev_ioctl,
1892 .open = idetape_chrdev_open, 1902 .open = idetape_chrdev_open,
1893 .release = idetape_chrdev_release, 1903 .release = idetape_chrdev_release,
1894}; 1904};
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 001a56365be5..0e05f75934c9 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -166,12 +166,13 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed,
166 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 166 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
167 memset(&p, 0, sizeof(p)); 167 memset(&p, 0, sizeof(p));
168 168
169 if (speed <= XFER_PIO_2) 169 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
170 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 170 if (speed <= XFER_PIO_2)
171 else if ((speed <= XFER_PIO_4) || 171 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
172 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 172 else if ((speed <= XFER_PIO_4) ||
173 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 173 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
174 else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 174 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
175 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
175 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 176 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
176 177
177 ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); 178 ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
@@ -185,11 +186,10 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed,
185 /* 186 /*
186 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 187 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
187 * S.M.A.R.T and some other commands. We have to ensure that the 188 * S.M.A.R.T and some other commands. We have to ensure that the
188 * DMA cycle timing is slower/equal than the fastest PIO timing. 189 * DMA cycle timing is slower/equal than the current PIO timing.
189 */ 190 */
190 if (speed >= XFER_SW_DMA_0) { 191 if (speed >= XFER_SW_DMA_0) {
191 u8 pio = ide_get_best_pio_mode(drive, 255, 5); 192 ide_timing_compute(drive, drive->pio_mode, &p, T, UT);
192 ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT);
193 ide_timing_merge(&p, t, t, IDE_TIMING_ALL); 193 ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
194 } 194 }
195 195
diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c
index 46d203ce60cc..5fc8d5c17de9 100644
--- a/drivers/ide/ide-xfer-mode.c
+++ b/drivers/ide/ide-xfer-mode.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(ide_xfer_verbose);
58 * This is used by most chipset support modules when "auto-tuning". 58 * This is used by most chipset support modules when "auto-tuning".
59 */ 59 */
60 60
61u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode) 61static u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
62{ 62{
63 u16 *id = drive->id; 63 u16 *id = drive->id;
64 int pio_mode = -1, overridden = 0; 64 int pio_mode = -1, overridden = 0;
@@ -105,7 +105,6 @@ u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
105 105
106 return pio_mode; 106 return pio_mode;
107} 107}
108EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
109 108
110int ide_pio_need_iordy(ide_drive_t *drive, const u8 pio) 109int ide_pio_need_iordy(ide_drive_t *drive, const u8 pio)
111{ 110{
@@ -135,17 +134,20 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
135 * set transfer mode on the device in ->set_pio_mode method... 134 * set transfer mode on the device in ->set_pio_mode method...
136 */ 135 */
137 if (port_ops->set_dma_mode == NULL) { 136 if (port_ops->set_dma_mode == NULL) {
138 port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 137 drive->pio_mode = mode;
138 port_ops->set_pio_mode(hwif, drive);
139 return 0; 139 return 0;
140 } 140 }
141 141
142 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 142 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
143 if (ide_config_drive_speed(drive, mode)) 143 if (ide_config_drive_speed(drive, mode))
144 return -1; 144 return -1;
145 port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 145 drive->pio_mode = mode;
146 port_ops->set_pio_mode(hwif, drive);
146 return 0; 147 return 0;
147 } else { 148 } else {
148 port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 149 drive->pio_mode = mode;
150 port_ops->set_pio_mode(hwif, drive);
149 return ide_config_drive_speed(drive, mode); 151 return ide_config_drive_speed(drive, mode);
150 } 152 }
151} 153}
@@ -164,10 +166,12 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
164 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 166 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
165 if (ide_config_drive_speed(drive, mode)) 167 if (ide_config_drive_speed(drive, mode))
166 return -1; 168 return -1;
167 port_ops->set_dma_mode(drive, mode); 169 drive->dma_mode = mode;
170 port_ops->set_dma_mode(hwif, drive);
168 return 0; 171 return 0;
169 } else { 172 } else {
170 port_ops->set_dma_mode(drive, mode); 173 drive->dma_mode = mode;
174 port_ops->set_dma_mode(hwif, drive);
171 return ide_config_drive_speed(drive, mode); 175 return ide_config_drive_speed(drive, mode);
172 } 176 }
173} 177}
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
index 0d266a5b524d..560e66d07659 100644
--- a/drivers/ide/it8172.c
+++ b/drivers/ide/it8172.c
@@ -37,12 +37,12 @@
37 37
38#define DRV_NAME "IT8172" 38#define DRV_NAME "IT8172"
39 39
40static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio) 40static void it8172_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
41{ 41{
42 ide_hwif_t *hwif = drive->hwif;
43 struct pci_dev *dev = to_pci_dev(hwif->dev); 42 struct pci_dev *dev = to_pci_dev(hwif->dev);
44 u16 drive_enables; 43 u16 drive_enables;
45 u32 drive_timing; 44 u32 drive_timing;
45 const u8 pio = drive->pio_mode - XFER_PIO_0;
46 46
47 /* 47 /*
48 * The highest value of DIOR/DIOW pulse width and recovery time 48 * The highest value of DIOR/DIOW pulse width and recovery time
@@ -77,14 +77,14 @@ static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio)
77 pci_write_config_dword(dev, 0x44, drive_timing); 77 pci_write_config_dword(dev, 0x44, drive_timing);
78} 78}
79 79
80static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed) 80static void it8172_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
81{ 81{
82 ide_hwif_t *hwif = drive->hwif;
83 struct pci_dev *dev = to_pci_dev(hwif->dev); 82 struct pci_dev *dev = to_pci_dev(hwif->dev);
84 int a_speed = 3 << (drive->dn * 4); 83 int a_speed = 3 << (drive->dn * 4);
85 int u_flag = 1 << drive->dn; 84 int u_flag = 1 << drive->dn;
86 int u_speed = 0; 85 int u_speed = 0;
87 u8 reg48, reg4a; 86 u8 reg48, reg4a;
87 const u8 speed = drive->dma_mode;
88 88
89 pci_read_config_byte(dev, 0x48, &reg48); 89 pci_read_config_byte(dev, 0x48, &reg48);
90 pci_read_config_byte(dev, 0x4a, &reg4a); 90 pci_read_config_byte(dev, 0x4a, &reg4a);
@@ -98,14 +98,14 @@ static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed)
98 pci_write_config_byte(dev, 0x4a, reg4a | u_speed); 98 pci_write_config_byte(dev, 0x4a, reg4a | u_speed);
99 } else { 99 } else {
100 const u8 mwdma_to_pio[] = { 0, 3, 4 }; 100 const u8 mwdma_to_pio[] = { 0, 3, 4 };
101 u8 pio;
102 101
103 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); 102 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
104 pci_write_config_byte(dev, 0x4a, reg4a & ~a_speed); 103 pci_write_config_byte(dev, 0x4a, reg4a & ~a_speed);
105 104
106 pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; 105 drive->pio_mode =
106 mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
107 107
108 it8172_set_pio_mode(drive, pio); 108 it8172_set_pio_mode(hwif, drive);
109 } 109 }
110} 110}
111 111
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
index 47976167796a..46816ba26416 100644
--- a/drivers/ide/it8213.c
+++ b/drivers/ide/it8213.c
@@ -17,15 +17,14 @@
17 17
18/** 18/**
19 * it8213_set_pio_mode - set host controller for PIO mode 19 * it8213_set_pio_mode - set host controller for PIO mode
20 * @hwif: port
20 * @drive: drive 21 * @drive: drive
21 * @pio: PIO mode number
22 * 22 *
23 * Set the interface PIO mode. 23 * Set the interface PIO mode.
24 */ 24 */
25 25
26static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio) 26static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
27{ 27{
28 ide_hwif_t *hwif = drive->hwif;
29 struct pci_dev *dev = to_pci_dev(hwif->dev); 28 struct pci_dev *dev = to_pci_dev(hwif->dev);
30 int is_slave = drive->dn & 1; 29 int is_slave = drive->dn & 1;
31 int master_port = 0x40; 30 int master_port = 0x40;
@@ -35,6 +34,7 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
35 u8 slave_data; 34 u8 slave_data;
36 static DEFINE_SPINLOCK(tune_lock); 35 static DEFINE_SPINLOCK(tune_lock);
37 int control = 0; 36 int control = 0;
37 const u8 pio = drive->pio_mode - XFER_PIO_0;
38 38
39 static const u8 timings[][2] = { 39 static const u8 timings[][2] = {
40 { 0, 0 }, 40 { 0, 0 },
@@ -74,15 +74,14 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
74 74
75/** 75/**
76 * it8213_set_dma_mode - set host controller for DMA mode 76 * it8213_set_dma_mode - set host controller for DMA mode
77 * @hwif: port
77 * @drive: drive 78 * @drive: drive
78 * @speed: DMA mode
79 * 79 *
80 * Tune the ITE chipset for the DMA mode. 80 * Tune the ITE chipset for the DMA mode.
81 */ 81 */
82 82
83static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed) 83static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
84{ 84{
85 ide_hwif_t *hwif = drive->hwif;
86 struct pci_dev *dev = to_pci_dev(hwif->dev); 85 struct pci_dev *dev = to_pci_dev(hwif->dev);
87 u8 maslave = 0x40; 86 u8 maslave = 0x40;
88 int a_speed = 3 << (drive->dn * 4); 87 int a_speed = 3 << (drive->dn * 4);
@@ -92,6 +91,7 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
92 int u_speed = 0; 91 int u_speed = 0;
93 u16 reg4042, reg4a; 92 u16 reg4042, reg4a;
94 u8 reg48, reg54, reg55; 93 u8 reg48, reg54, reg55;
94 const u8 speed = drive->dma_mode;
95 95
96 pci_read_config_word(dev, maslave, &reg4042); 96 pci_read_config_word(dev, maslave, &reg4042);
97 pci_read_config_byte(dev, 0x48, &reg48); 97 pci_read_config_byte(dev, 0x48, &reg48);
@@ -120,7 +120,6 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
120 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); 120 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
121 } else { 121 } else {
122 const u8 mwdma_to_pio[] = { 0, 3, 4 }; 122 const u8 mwdma_to_pio[] = { 0, 3, 4 };
123 u8 pio;
124 123
125 if (reg48 & u_flag) 124 if (reg48 & u_flag)
126 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); 125 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
@@ -132,11 +131,12 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
132 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 131 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
133 132
134 if (speed >= XFER_MW_DMA_0) 133 if (speed >= XFER_MW_DMA_0)
135 pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; 134 drive->pio_mode =
135 mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
136 else 136 else
137 pio = 2; /* only SWDMA2 is allowed */ 137 drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
138 138
139 it8213_set_pio_mode(drive, pio); 139 it8213_set_pio_mode(hwif, drive);
140 } 140 }
141} 141}
142 142
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index 51aa745246dc..b2709c733485 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -228,18 +228,18 @@ static void it821x_clock_strategy(ide_drive_t *drive)
228 228
229/** 229/**
230 * it821x_set_pio_mode - set host controller for PIO mode 230 * it821x_set_pio_mode - set host controller for PIO mode
231 * @hwif: port
231 * @drive: drive 232 * @drive: drive
232 * @pio: PIO mode number
233 * 233 *
234 * Tune the host to the desired PIO mode taking into the consideration 234 * Tune the host to the desired PIO mode taking into the consideration
235 * the maximum PIO mode supported by the other device on the cable. 235 * the maximum PIO mode supported by the other device on the cable.
236 */ 236 */
237 237
238static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio) 238static void it821x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
239{ 239{
240 ide_hwif_t *hwif = drive->hwif;
241 struct it821x_dev *itdev = ide_get_hwifdata(hwif); 240 struct it821x_dev *itdev = ide_get_hwifdata(hwif);
242 ide_drive_t *pair = ide_get_pair_dev(drive); 241 ide_drive_t *pair = ide_get_pair_dev(drive);
242 const u8 pio = drive->pio_mode - XFER_PIO_0;
243 u8 unit = drive->dn & 1, set_pio = pio; 243 u8 unit = drive->dn & 1, set_pio = pio;
244 244
245 /* Spec says 89 ref driver uses 88 */ 245 /* Spec says 89 ref driver uses 88 */
@@ -252,7 +252,7 @@ static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio)
252 * on the cable. 252 * on the cable.
253 */ 253 */
254 if (pair) { 254 if (pair) {
255 u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4); 255 u8 pair_pio = pair->pio_mode - XFER_PIO_0;
256 /* trim PIO to the slowest of the master/slave */ 256 /* trim PIO to the slowest of the master/slave */
257 if (pair_pio < set_pio) 257 if (pair_pio < set_pio)
258 set_pio = pair_pio; 258 set_pio = pair_pio;
@@ -393,14 +393,16 @@ static int it821x_dma_end(ide_drive_t *drive)
393 393
394/** 394/**
395 * it821x_set_dma_mode - set host controller for DMA mode 395 * it821x_set_dma_mode - set host controller for DMA mode
396 * @hwif: port
396 * @drive: drive 397 * @drive: drive
397 * @speed: DMA mode
398 * 398 *
399 * Tune the ITE chipset for the desired DMA mode. 399 * Tune the ITE chipset for the desired DMA mode.
400 */ 400 */
401 401
402static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed) 402static void it821x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
403{ 403{
404 const u8 speed = drive->dma_mode;
405
404 /* 406 /*
405 * MWDMA tuning is really hard because our MWDMA and PIO 407 * MWDMA tuning is really hard because our MWDMA and PIO
406 * timings are kept in the same place. We can switch in the 408 * timings are kept in the same place. We can switch in the
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
index bf2be6431b20..74c2c4a6d909 100644
--- a/drivers/ide/jmicron.c
+++ b/drivers/ide/jmicron.c
@@ -80,19 +80,19 @@ static u8 jmicron_cable_detect(ide_hwif_t *hwif)
80 return ATA_CBL_PATA80; 80 return ATA_CBL_PATA80;
81} 81}
82 82
83static void jmicron_set_pio_mode(ide_drive_t *drive, const u8 pio) 83static void jmicron_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
84{ 84{
85} 85}
86 86
87/** 87/**
88 * jmicron_set_dma_mode - set host controller for DMA mode 88 * jmicron_set_dma_mode - set host controller for DMA mode
89 * @hwif: port
89 * @drive: drive 90 * @drive: drive
90 * @mode: DMA mode
91 * 91 *
92 * As the JMicron snoops for timings we don't need to do anything here. 92 * As the JMicron snoops for timings we don't need to do anything here.
93 */ 93 */
94 94
95static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode) 95static void jmicron_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
96{ 96{
97} 97}
98 98
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
index f1d70d6630fe..1a53a4c375ed 100644
--- a/drivers/ide/opti621.c
+++ b/drivers/ide/opti621.c
@@ -8,77 +8,6 @@
8 * Jan Harkes <jaharkes@cwi.nl>, 8 * Jan Harkes <jaharkes@cwi.nl>,
9 * Mark Lord <mlord@pobox.com> 9 * Mark Lord <mlord@pobox.com>
10 * Some parts of code are from ali14xx.c and from rz1000.c. 10 * Some parts of code are from ali14xx.c and from rz1000.c.
11 *
12 * OPTi is trademark of OPTi, Octek is trademark of Octek.
13 *
14 * I used docs from OPTi databook, from ftp.opti.com, file 9123-0002.ps
15 * and disassembled/traced setupvic.exe (DOS program).
16 * It increases kernel code about 2 kB.
17 * I don't have this card no more, but I hope I can get some in case
18 * of needed development.
19 * My card is Octek PIDE 1.01 (on card) or OPTiViC (program).
20 * It has a place for a secondary connector in circuit, but nothing
21 * is there. Also BIOS says no address for
22 * secondary controller (see bellow in ide_init_opti621).
23 * I've only tested this on my system, which only has one disk.
24 * It's Western Digital WDAC2850, with PIO mode 3. The PCI bus
25 * is at 20 MHz (I have DX2/80, I tried PCI at 40, but I got random
26 * lockups). I tried the OCTEK double speed CD-ROM and
27 * it does not work! But I can't boot DOS also, so it's probably
28 * hardware fault. I have connected Conner 80MB, the Seagate 850MB (no
29 * problems) and Seagate 1GB (as slave, WD as master). My experiences
30 * with the third, 1GB drive: I got 3MB/s (hdparm), but sometimes
31 * it slows to about 100kB/s! I don't know why and I have
32 * not this drive now, so I can't try it again.
33 * I write this driver because I lost the paper ("manual") with
34 * settings of jumpers on the card and I have to boot Linux with
35 * Loadlin except LILO, cause I have to run the setupvic.exe program
36 * already or I get disk errors (my test: rpm -Vf
37 * /usr/X11R6/bin/XF86_SVGA - or any big file).
38 * Some numbers from hdparm -t /dev/hda:
39 * Timing buffer-cache reads: 32 MB in 3.02 seconds =10.60 MB/sec
40 * Timing buffered disk reads: 16 MB in 5.52 seconds = 2.90 MB/sec
41 * I have 4 Megs/s before, but I don't know why (maybe changes
42 * in hdparm test).
43 * After release of 0.1, I got some successful reports, so it might work.
44 *
45 * The main problem with OPTi is that some timings for master
46 * and slave must be the same. For example, if you have master
47 * PIO 3 and slave PIO 0, driver have to set some timings of
48 * master for PIO 0. Second problem is that opti621_set_pio_mode
49 * got only one drive to set, but have to set both drives.
50 * This is solved in compute_pios. If you don't set
51 * the second drive, compute_pios use ide_get_best_pio_mode
52 * for autoselect mode (you can change it to PIO 0, if you want).
53 * If you then set the second drive to another PIO, the old value
54 * (automatically selected) will be overrided by yours.
55 * There is a 25/33MHz switch in configuration
56 * register, but driver is written for use at any frequency.
57 *
58 * Version 0.1, Nov 8, 1996
59 * by Jaromir Koutek, for 2.1.8.
60 * Initial version of driver.
61 *
62 * Version 0.2
63 * Number 0.2 skipped.
64 *
65 * Version 0.3, Nov 29, 1997
66 * by Mark Lord (probably), for 2.1.68
67 * Updates for use with new IDE block driver.
68 *
69 * Version 0.4, Dec 14, 1997
70 * by Jan Harkes
71 * Fixed some errors and cleaned the code.
72 *
73 * Version 0.5, Jan 2, 1998
74 * by Jaromir Koutek
75 * Updates for use with (again) new IDE block driver.
76 * Update of documentation.
77 *
78 * Version 0.6, Jan 2, 1999
79 * by Jaromir Koutek
80 * Reversed to version 0.3 of the driver, because
81 * 0.5 doesn't work.
82 */ 11 */
83 12
84#include <linux/types.h> 13#include <linux/types.h>
@@ -133,12 +62,12 @@ static u8 read_reg(int reg)
133 return ret; 62 return ret;
134} 63}
135 64
136static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio) 65static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
137{ 66{
138 ide_hwif_t *hwif = drive->hwif;
139 ide_drive_t *pair = ide_get_pair_dev(drive); 67 ide_drive_t *pair = ide_get_pair_dev(drive);
140 unsigned long flags; 68 unsigned long flags;
141 unsigned long mode = XFER_PIO_0 + pio, pair_mode; 69 unsigned long mode = drive->pio_mode, pair_mode;
70 const u8 pio = mode - XFER_PIO_0;
142 u8 tim, misc, addr_pio = pio, clk; 71 u8 tim, misc, addr_pio = pio, clk;
143 72
144 /* DRDY is default 2 (by OPTi Databook) */ 73 /* DRDY is default 2 (by OPTi Databook) */
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index f8eddf05ecb8..9e8f4e1b0cc9 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -166,7 +166,7 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
166 writel(val32, base + BK3710_DATRCVR); 166 writel(val32, base + BK3710_DATRCVR);
167 167
168 if (mate) { 168 if (mate) {
169 u8 mode2 = ide_get_best_pio_mode(mate, 255, 4); 169 u8 mode2 = mate->pio_mode - XFER_PIO_0;
170 170
171 if (mode2 < mode) 171 if (mode2 < mode)
172 mode = mode2; 172 mode = mode2;
@@ -188,10 +188,11 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
188 writel(val32, base + BK3710_REGRCVR); 188 writel(val32, base + BK3710_REGRCVR);
189} 189}
190 190
191static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed) 191static void palm_bk3710_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
192{ 192{
193 int is_slave = drive->dn & 1; 193 int is_slave = drive->dn & 1;
194 void __iomem *base = (void *)drive->hwif->dma_base; 194 void __iomem *base = (void *)hwif->dma_base;
195 const u8 xferspeed = drive->dma_mode;
195 196
196 if (xferspeed >= XFER_UDMA_0) { 197 if (xferspeed >= XFER_UDMA_0) {
197 palm_bk3710_setudmamode(base, is_slave, 198 palm_bk3710_setudmamode(base, is_slave,
@@ -203,12 +204,13 @@ static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed)
203 } 204 }
204} 205}
205 206
206static void palm_bk3710_set_pio_mode(ide_drive_t *drive, u8 pio) 207static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
207{ 208{
208 unsigned int cycle_time; 209 unsigned int cycle_time;
209 int is_slave = drive->dn & 1; 210 int is_slave = drive->dn & 1;
210 ide_drive_t *mate; 211 ide_drive_t *mate;
211 void __iomem *base = (void *)drive->hwif->dma_base; 212 void __iomem *base = (void *)hwif->dma_base;
213 const u8 pio = drive->pio_mode - XFER_PIO_0;
212 214
213 /* 215 /*
214 * Obtain the drive PIO data for tuning the Palm Chip registers 216 * Obtain the drive PIO data for tuning the Palm Chip registers
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 65ba8239e7b5..9546fe2a93f7 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -129,11 +129,11 @@ static struct udma_timing {
129 { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */ 129 { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
130}; 130};
131 131
132static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed) 132static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
133{ 133{
134 ide_hwif_t *hwif = drive->hwif;
135 struct pci_dev *dev = to_pci_dev(hwif->dev); 134 struct pci_dev *dev = to_pci_dev(hwif->dev);
136 u8 adj = (drive->dn & 1) ? 0x08 : 0x00; 135 u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
136 const u8 speed = drive->dma_mode;
137 137
138 /* 138 /*
139 * IDE core issues SETFEATURES_XFER to the drive first (thanks to 139 * IDE core issues SETFEATURES_XFER to the drive first (thanks to
@@ -167,11 +167,11 @@ static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed)
167 } 167 }
168} 168}
169 169
170static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio) 170static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
171{ 171{
172 ide_hwif_t *hwif = drive->hwif;
173 struct pci_dev *dev = to_pci_dev(hwif->dev); 172 struct pci_dev *dev = to_pci_dev(hwif->dev);
174 u8 adj = (drive->dn & 1) ? 0x08 : 0x00; 173 u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
174 const u8 pio = drive->pio_mode - XFER_PIO_0;
175 175
176 if (max_dma_rate(dev) == 4) { 176 if (max_dma_rate(dev) == 4) {
177 set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c); 177 set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 35161dd840a0..c5f3841af360 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc. 3 * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
4 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * Portions Copyright (C) 1999 Promise Technology, Inc. 6 * Portions Copyright (C) 1999 Promise Technology, Inc.
7 * Author: Frank Tiernan (frankt@promise.com) 7 * Author: Frank Tiernan (frankt@promise.com)
@@ -21,23 +21,15 @@
21 21
22#define DRV_NAME "pdc202xx_old" 22#define DRV_NAME "pdc202xx_old"
23 23
24static void pdc_old_disable_66MHz_clock(ide_hwif_t *); 24static void pdc202xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
25
26static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
27{ 25{
28 ide_hwif_t *hwif = drive->hwif;
29 struct pci_dev *dev = to_pci_dev(hwif->dev); 26 struct pci_dev *dev = to_pci_dev(hwif->dev);
30 u8 drive_pci = 0x60 + (drive->dn << 2); 27 u8 drive_pci = 0x60 + (drive->dn << 2);
28 const u8 speed = drive->dma_mode;
31 29
32 u8 AP = 0, BP = 0, CP = 0; 30 u8 AP = 0, BP = 0, CP = 0;
33 u8 TA = 0, TB = 0, TC = 0; 31 u8 TA = 0, TB = 0, TC = 0;
34 32
35 /*
36 * TODO: do this once per channel
37 */
38 if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
39 pdc_old_disable_66MHz_clock(hwif);
40
41 pci_read_config_byte(dev, drive_pci, &AP); 33 pci_read_config_byte(dev, drive_pci, &AP);
42 pci_read_config_byte(dev, drive_pci + 1, &BP); 34 pci_read_config_byte(dev, drive_pci + 1, &BP);
43 pci_read_config_byte(dev, drive_pci + 2, &CP); 35 pci_read_config_byte(dev, drive_pci + 2, &CP);
@@ -84,9 +76,10 @@ static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
84 } 76 }
85} 77}
86 78
87static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio) 79static void pdc202xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
88{ 80{
89 pdc202xx_set_mode(drive, XFER_PIO_0 + pio); 81 drive->dma_mode = drive->pio_mode;
82 pdc202xx_set_mode(hwif, drive);
90} 83}
91 84
92static int pdc202xx_test_irq(ide_hwif_t *hwif) 85static int pdc202xx_test_irq(ide_hwif_t *hwif)
@@ -100,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
100 * bit 7: error, bit 6: interrupting, 93 * bit 7: error, bit 6: interrupting,
101 * bit 5: FIFO full, bit 4: FIFO empty 94 * bit 5: FIFO full, bit 4: FIFO empty
102 */ 95 */
103 return ((sc1d & 0x50) == 0x40) ? 1 : 0; 96 return ((sc1d & 0x50) == 0x50) ? 1 : 0;
104 } else { 97 } else {
105 /* 98 /*
106 * bit 3: error, bit 2: interrupting, 99 * bit 3: error, bit 2: interrupting,
107 * bit 1: FIFO full, bit 0: FIFO empty 100 * bit 1: FIFO full, bit 0: FIFO empty
108 */ 101 */
109 return ((sc1d & 0x05) == 0x04) ? 1 : 0; 102 return ((sc1d & 0x05) == 0x05) ? 1 : 0;
110 } 103 }
111} 104}
112 105
@@ -145,6 +138,11 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
145 outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); 138 outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
146} 139}
147 140
141static void pdc2026x_init_hwif(ide_hwif_t *hwif)
142{
143 pdc_old_disable_66MHz_clock(hwif);
144}
145
148static void pdc202xx_dma_start(ide_drive_t *drive) 146static void pdc202xx_dma_start(ide_drive_t *drive)
149{ 147{
150 if (drive->current_speed > XFER_UDMA_2) 148 if (drive->current_speed > XFER_UDMA_2)
@@ -261,6 +259,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
261 { \ 259 { \
262 .name = DRV_NAME, \ 260 .name = DRV_NAME, \
263 .init_chipset = init_chipset_pdc202xx, \ 261 .init_chipset = init_chipset_pdc202xx, \
262 .init_hwif = pdc2026x_init_hwif, \
264 .port_ops = &pdc2026x_port_ops, \ 263 .port_ops = &pdc2026x_port_ops, \
265 .dma_ops = &pdc2026x_dma_ops, \ 264 .dma_ops = &pdc2026x_dma_ops, \
266 .host_flags = IDE_HFLAGS_PDC202XX, \ 265 .host_flags = IDE_HFLAGS_PDC202XX, \
@@ -356,6 +355,6 @@ static void __exit pdc202xx_ide_exit(void)
356module_init(pdc202xx_ide_init); 355module_init(pdc202xx_ide_init);
357module_exit(pdc202xx_ide_exit); 356module_exit(pdc202xx_ide_exit);
358 357
359MODULE_AUTHOR("Andre Hedrick, Frank Tiernan"); 358MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Bartlomiej Zolnierkiewicz");
360MODULE_DESCRIPTION("PCI driver module for older Promise IDE"); 359MODULE_DESCRIPTION("PCI driver module for older Promise IDE");
361MODULE_LICENSE("GPL"); 360MODULE_LICENSE("GPL");
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index bf14f39bd3a7..1bdca49e5a03 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -59,15 +59,14 @@ static int no_piix_dma;
59 59
60/** 60/**
61 * piix_set_pio_mode - set host controller for PIO mode 61 * piix_set_pio_mode - set host controller for PIO mode
62 * @port: port
62 * @drive: drive 63 * @drive: drive
63 * @pio: PIO mode number
64 * 64 *
65 * Set the interface PIO mode based upon the settings done by AMI BIOS. 65 * Set the interface PIO mode based upon the settings done by AMI BIOS.
66 */ 66 */
67 67
68static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio) 68static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
69{ 69{
70 ide_hwif_t *hwif = drive->hwif;
71 struct pci_dev *dev = to_pci_dev(hwif->dev); 70 struct pci_dev *dev = to_pci_dev(hwif->dev);
72 int is_slave = drive->dn & 1; 71 int is_slave = drive->dn & 1;
73 int master_port = hwif->channel ? 0x42 : 0x40; 72 int master_port = hwif->channel ? 0x42 : 0x40;
@@ -77,6 +76,7 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
77 u8 slave_data; 76 u8 slave_data;
78 static DEFINE_SPINLOCK(tune_lock); 77 static DEFINE_SPINLOCK(tune_lock);
79 int control = 0; 78 int control = 0;
79 const u8 pio = drive->pio_mode - XFER_PIO_0;
80 80
81 /* ISP RTC */ 81 /* ISP RTC */
82 static const u8 timings[][2]= { 82 static const u8 timings[][2]= {
@@ -127,16 +127,15 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
127 127
128/** 128/**
129 * piix_set_dma_mode - set host controller for DMA mode 129 * piix_set_dma_mode - set host controller for DMA mode
130 * @hwif: port
130 * @drive: drive 131 * @drive: drive
131 * @speed: DMA mode
132 * 132 *
133 * Set a PIIX host controller to the desired DMA mode. This involves 133 * Set a PIIX host controller to the desired DMA mode. This involves
134 * programming the right timing data into the PCI configuration space. 134 * programming the right timing data into the PCI configuration space.
135 */ 135 */
136 136
137static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) 137static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
138{ 138{
139 ide_hwif_t *hwif = drive->hwif;
140 struct pci_dev *dev = to_pci_dev(hwif->dev); 139 struct pci_dev *dev = to_pci_dev(hwif->dev);
141 u8 maslave = hwif->channel ? 0x42 : 0x40; 140 u8 maslave = hwif->channel ? 0x42 : 0x40;
142 int a_speed = 3 << (drive->dn * 4); 141 int a_speed = 3 << (drive->dn * 4);
@@ -147,6 +146,7 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
147 int sitre; 146 int sitre;
148 u16 reg4042, reg4a; 147 u16 reg4042, reg4a;
149 u8 reg48, reg54, reg55; 148 u8 reg48, reg54, reg55;
149 const u8 speed = drive->dma_mode;
150 150
151 pci_read_config_word(dev, maslave, &reg4042); 151 pci_read_config_word(dev, maslave, &reg4042);
152 sitre = (reg4042 & 0x4000) ? 1 : 0; 152 sitre = (reg4042 & 0x4000) ? 1 : 0;
@@ -176,7 +176,6 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
176 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); 176 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
177 } else { 177 } else {
178 const u8 mwdma_to_pio[] = { 0, 3, 4 }; 178 const u8 mwdma_to_pio[] = { 0, 3, 4 };
179 u8 pio;
180 179
181 if (reg48 & u_flag) 180 if (reg48 & u_flag)
182 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); 181 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
@@ -188,11 +187,12 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
188 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 187 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
189 188
190 if (speed >= XFER_MW_DMA_0) 189 if (speed >= XFER_MW_DMA_0)
191 pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; 190 drive->pio_mode =
191 mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
192 else 192 else
193 pio = 2; /* only SWDMA2 is allowed */ 193 drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
194 194
195 piix_set_pio_mode(drive, pio); 195 piix_set_pio_mode(hwif, drive);
196 } 196 }
197} 197}
198 198
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 7a4e788cab2f..850ee452e9bb 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -496,12 +496,11 @@ static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
496/* 496/*
497 * Old tuning functions (called on hdparm -p), sets up drive PIO timings 497 * Old tuning functions (called on hdparm -p), sets up drive PIO timings
498 */ 498 */
499static void 499static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
500pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
501{ 500{
502 ide_hwif_t *hwif = drive->hwif;
503 pmac_ide_hwif_t *pmif = 501 pmac_ide_hwif_t *pmif =
504 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); 502 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
503 const u8 pio = drive->pio_mode - XFER_PIO_0;
505 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); 504 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
506 u32 *timings, t; 505 u32 *timings, t;
507 unsigned accessTicks, recTicks; 506 unsigned accessTicks, recTicks;
@@ -778,14 +777,14 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
778#endif 777#endif
779} 778}
780 779
781static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) 780static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
782{ 781{
783 ide_hwif_t *hwif = drive->hwif;
784 pmac_ide_hwif_t *pmif = 782 pmac_ide_hwif_t *pmif =
785 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); 783 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
786 int ret = 0; 784 int ret = 0;
787 u32 *timings, *timings2, tl[2]; 785 u32 *timings, *timings2, tl[2];
788 u8 unit = drive->dn & 1; 786 u8 unit = drive->dn & 1;
787 const u8 speed = drive->dma_mode;
789 788
790 timings = &pmif->timings[unit]; 789 timings = &pmif->timings[unit];
791 timings2 = &pmif->timings[unit+2]; 790 timings2 = &pmif->timings[unit+2];
@@ -1651,8 +1650,8 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
1651 if ((status & FLUSH) == 0) 1650 if ((status & FLUSH) == 0)
1652 break; 1651 break;
1653 if (++timeout > 100) { 1652 if (++timeout > 100) {
1654 printk(KERN_WARNING "ide%d, ide_dma_test_irq \ 1653 printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
1655 timeout flushing channel\n", hwif->index); 1654 hwif->index);
1656 break; 1655 break;
1657 } 1656 }
1658 } 1657 }
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 74696edc8d1d..3f0244fd8e62 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -189,15 +189,13 @@ static void qd_set_timing (ide_drive_t *drive, u8 timing)
189 printk(KERN_DEBUG "%s: %#x\n", drive->name, timing); 189 printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
190} 190}
191 191
192static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio) 192static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
193{ 193{
194 u16 *id = drive->id; 194 u16 *id = drive->id;
195 int active_time = 175; 195 int active_time = 175;
196 int recovery_time = 415; /* worst case values from the dos driver */ 196 int recovery_time = 415; /* worst case values from the dos driver */
197 197
198 /* 198 /* FIXME: use drive->pio_mode value */
199 * FIXME: use "pio" value
200 */
201 if (!qd_find_disk_type(drive, &active_time, &recovery_time) && 199 if (!qd_find_disk_type(drive, &active_time, &recovery_time) &&
202 (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) && 200 (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) &&
203 id[ATA_ID_EIDE_PIO] >= 240) { 201 id[ATA_ID_EIDE_PIO] >= 240) {
@@ -211,9 +209,9 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
211 active_time, recovery_time)); 209 active_time, recovery_time));
212} 210}
213 211
214static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) 212static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
215{ 213{
216 ide_hwif_t *hwif = drive->hwif; 214 const u8 pio = drive->pio_mode - XFER_PIO_0;
217 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); 215 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
218 unsigned int cycle_time; 216 unsigned int cycle_time;
219 int active_time = 175; 217 int active_time = 175;
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
index d467478d68da..134f1fd13866 100644
--- a/drivers/ide/sc1200.c
+++ b/drivers/ide/sc1200.c
@@ -122,13 +122,13 @@ out:
122 return mask; 122 return mask;
123} 123}
124 124
125static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode) 125static void sc1200_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
126{ 126{
127 ide_hwif_t *hwif = drive->hwif;
128 struct pci_dev *dev = to_pci_dev(hwif->dev); 127 struct pci_dev *dev = to_pci_dev(hwif->dev);
129 unsigned int reg, timings; 128 unsigned int reg, timings;
130 unsigned short pci_clock; 129 unsigned short pci_clock;
131 unsigned int basereg = hwif->channel ? 0x50 : 0x40; 130 unsigned int basereg = hwif->channel ? 0x50 : 0x40;
131 const u8 mode = drive->dma_mode;
132 132
133 static const u32 udma_timing[3][3] = { 133 static const u32 udma_timing[3][3] = {
134 { 0x00921250, 0x00911140, 0x00911030 }, 134 { 0x00921250, 0x00911140, 0x00911030 },
@@ -193,10 +193,10 @@ static int sc1200_dma_end(ide_drive_t *drive)
193 * will have valid default PIO timings set up before we get here. 193 * will have valid default PIO timings set up before we get here.
194 */ 194 */
195 195
196static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio) 196static void sc1200_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
197{ 197{
198 ide_hwif_t *hwif = drive->hwif;
199 int mode = -1; 198 int mode = -1;
199 const u8 pio = drive->pio_mode - XFER_PIO_0;
200 200
201 /* 201 /*
202 * bad abuse of ->set_pio_mode interface 202 * bad abuse of ->set_pio_mode interface
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 1104bb301eb9..b7f5b0c4310c 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -199,16 +199,15 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
199 199
200/** 200/**
201 * scc_set_pio_mode - set host controller for PIO mode 201 * scc_set_pio_mode - set host controller for PIO mode
202 * @hwif: port
202 * @drive: drive 203 * @drive: drive
203 * @pio: PIO mode number
204 * 204 *
205 * Load the timing settings for this device mode into the 205 * Load the timing settings for this device mode into the
206 * controller. 206 * controller.
207 */ 207 */
208 208
209static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio) 209static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
210{ 210{
211 ide_hwif_t *hwif = drive->hwif;
212 struct scc_ports *ports = ide_get_hwifdata(hwif); 211 struct scc_ports *ports = ide_get_hwifdata(hwif);
213 unsigned long ctl_base = ports->ctl; 212 unsigned long ctl_base = ports->ctl;
214 unsigned long cckctrl_port = ctl_base + 0xff0; 213 unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -216,6 +215,7 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
216 unsigned long pioct_port = ctl_base + 0x004; 215 unsigned long pioct_port = ctl_base + 0x004;
217 unsigned long reg; 216 unsigned long reg;
218 int offset; 217 int offset;
218 const u8 pio = drive->pio_mode - XFER_PIO_0;
219 219
220 reg = in_be32((void __iomem *)cckctrl_port); 220 reg = in_be32((void __iomem *)cckctrl_port);
221 if (reg & CCKCTRL_ATACLKOEN) { 221 if (reg & CCKCTRL_ATACLKOEN) {
@@ -231,16 +231,15 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
231 231
232/** 232/**
233 * scc_set_dma_mode - set host controller for DMA mode 233 * scc_set_dma_mode - set host controller for DMA mode
234 * @hwif: port
234 * @drive: drive 235 * @drive: drive
235 * @speed: DMA mode
236 * 236 *
237 * Load the timing settings for this device mode into the 237 * Load the timing settings for this device mode into the
238 * controller. 238 * controller.
239 */ 239 */
240 240
241static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed) 241static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
242{ 242{
243 ide_hwif_t *hwif = drive->hwif;
244 struct scc_ports *ports = ide_get_hwifdata(hwif); 243 struct scc_ports *ports = ide_get_hwifdata(hwif);
245 unsigned long ctl_base = ports->ctl; 244 unsigned long ctl_base = ports->ctl;
246 unsigned long cckctrl_port = ctl_base + 0xff0; 245 unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -254,6 +253,7 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
254 int offset, idx; 253 int offset, idx;
255 unsigned long reg; 254 unsigned long reg;
256 unsigned long jcactsel; 255 unsigned long jcactsel;
256 const u8 speed = drive->dma_mode;
257 257
258 reg = in_be32((void __iomem *)cckctrl_port); 258 reg = in_be32((void __iomem *)cckctrl_port);
259 if (reg & CCKCTRL_ATACLKOEN) { 259 if (reg & CCKCTRL_ATACLKOEN) {
@@ -872,20 +872,18 @@ static struct pci_driver scc_pci_driver = {
872 .remove = __devexit_p(scc_remove), 872 .remove = __devexit_p(scc_remove),
873}; 873};
874 874
875static int scc_ide_init(void) 875static int __init scc_ide_init(void)
876{ 876{
877 return ide_pci_register_driver(&scc_pci_driver); 877 return ide_pci_register_driver(&scc_pci_driver);
878} 878}
879 879
880module_init(scc_ide_init); 880static void __exit scc_ide_exit(void)
881/* -- No exit code?
882static void scc_ide_exit(void)
883{ 881{
884 ide_pci_unregister_driver(&scc_pci_driver); 882 pci_unregister_driver(&scc_pci_driver);
885} 883}
886module_exit(scc_ide_exit);
887 */
888 884
885module_init(scc_ide_init);
886module_exit(scc_ide_exit);
889 887
890MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE"); 888MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
891MODULE_LICENSE("GPL"); 889MODULE_LICENSE("GPL");
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index b6554ef92716..35fb8dabb55d 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 1998-2000 Michel Aubry 2 * Copyright (C) 1998-2000 Michel Aubry
3 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz 3 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
4 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 5 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
6 * Portions copyright (c) 2001 Sun Microsystems 6 * Portions copyright (c) 2001 Sun Microsystems
7 * 7 *
8 * 8 *
@@ -52,8 +52,6 @@ static const char *svwks_bad_ata100[] = {
52 NULL 52 NULL
53}; 53};
54 54
55static struct pci_dev *isa_dev;
56
57static int check_in_drive_lists (ide_drive_t *drive, const char **list) 55static int check_in_drive_lists (ide_drive_t *drive, const char **list)
58{ 56{
59 char *m = (char *)&drive->id[ATA_ID_PROD]; 57 char *m = (char *)&drive->id[ATA_ID_PROD];
@@ -67,26 +65,14 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
67static u8 svwks_udma_filter(ide_drive_t *drive) 65static u8 svwks_udma_filter(ide_drive_t *drive)
68{ 66{
69 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 67 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
70 u8 mask = 0;
71 68
72 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) 69 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
73 return 0x1f; 70 return 0x1f;
74 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
75 u32 reg = 0;
76 if (isa_dev)
77 pci_read_config_dword(isa_dev, 0x64, &reg);
78
79 /*
80 * Don't enable UDMA on disk devices for the moment
81 */
82 if(drive->media == ide_disk)
83 return 0;
84 /* Check the OSB4 DMA33 enable bit */
85 return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
86 } else if (dev->revision < SVWKS_CSB5_REVISION_NEW) { 71 } else if (dev->revision < SVWKS_CSB5_REVISION_NEW) {
87 return 0x07; 72 return 0x07;
88 } else if (dev->revision >= SVWKS_CSB5_REVISION_NEW) { 73 } else {
89 u8 btr = 0, mode; 74 u8 btr = 0, mode, mask;
75
90 pci_read_config_byte(dev, 0x5A, &btr); 76 pci_read_config_byte(dev, 0x5A, &btr);
91 mode = btr & 0x3; 77 mode = btr & 0x3;
92 78
@@ -101,13 +87,9 @@ static u8 svwks_udma_filter(ide_drive_t *drive)
101 case 1: mask = 0x07; break; 87 case 1: mask = 0x07; break;
102 default: mask = 0x00; break; 88 default: mask = 0x00; break;
103 } 89 }
104 }
105 if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
106 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) &&
107 (!(PCI_FUNC(dev->devfn) & 1)))
108 mask = 0x1f;
109 90
110 return mask; 91 return mask;
92 }
111} 93}
112 94
113static u8 svwks_csb_check (struct pci_dev *dev) 95static u8 svwks_csb_check (struct pci_dev *dev)
@@ -124,12 +106,13 @@ static u8 svwks_csb_check (struct pci_dev *dev)
124 return 0; 106 return 0;
125} 107}
126 108
127static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio) 109static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
128{ 110{
129 static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 }; 111 static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
130 static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 }; 112 static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
131 113
132 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 114 struct pci_dev *dev = to_pci_dev(hwif->dev);
115 const u8 pio = drive->pio_mode - XFER_PIO_0;
133 116
134 pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]); 117 pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
135 118
@@ -145,14 +128,14 @@ static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio)
145 } 128 }
146} 129}
147 130
148static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed) 131static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
149{ 132{
150 static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 }; 133 static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
151 static const u8 dma_modes[] = { 0x77, 0x21, 0x20 }; 134 static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
152 static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 }; 135 static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
153 136
154 ide_hwif_t *hwif = drive->hwif;
155 struct pci_dev *dev = to_pci_dev(hwif->dev); 137 struct pci_dev *dev = to_pci_dev(hwif->dev);
138 const u8 speed = drive->dma_mode;
156 u8 unit = drive->dn & 1; 139 u8 unit = drive->dn & 1;
157 140
158 u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0; 141 u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
@@ -185,8 +168,9 @@ static int init_chipset_svwks(struct pci_dev *dev)
185 168
186 /* OSB4 : South Bridge and IDE */ 169 /* OSB4 : South Bridge and IDE */
187 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 170 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
188 isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 171 struct pci_dev *isa_dev =
189 PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL); 172 pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
173 PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
190 if (isa_dev) { 174 if (isa_dev) {
191 pci_read_config_dword(isa_dev, 0x64, &reg); 175 pci_read_config_dword(isa_dev, 0x64, &reg);
192 reg &= ~0x00002000; /* disable 600ns interrupt mask */ 176 reg &= ~0x00002000; /* disable 600ns interrupt mask */
@@ -195,6 +179,7 @@ static int init_chipset_svwks(struct pci_dev *dev)
195 "enabled.\n", pci_name(dev)); 179 "enabled.\n", pci_name(dev));
196 reg |= 0x00004000; /* enable UDMA/33 support */ 180 reg |= 0x00004000; /* enable UDMA/33 support */
197 pci_write_config_dword(isa_dev, 0x64, reg); 181 pci_write_config_dword(isa_dev, 0x64, reg);
182 pci_dev_put(isa_dev);
198 } 183 }
199 } 184 }
200 185
@@ -343,7 +328,6 @@ static u8 svwks_cable_detect(ide_hwif_t *hwif)
343static const struct ide_port_ops osb4_port_ops = { 328static const struct ide_port_ops osb4_port_ops = {
344 .set_pio_mode = svwks_set_pio_mode, 329 .set_pio_mode = svwks_set_pio_mode,
345 .set_dma_mode = svwks_set_dma_mode, 330 .set_dma_mode = svwks_set_dma_mode,
346 .udma_filter = svwks_udma_filter,
347}; 331};
348 332
349static const struct ide_port_ops svwks_port_ops = { 333static const struct ide_port_ops svwks_port_ops = {
@@ -460,6 +444,6 @@ static void __exit svwks_ide_exit(void)
460module_init(svwks_ide_init); 444module_init(svwks_ide_init);
461module_exit(svwks_ide_exit); 445module_exit(svwks_ide_exit);
462 446
463MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick"); 447MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick, Bartlomiej Zolnierkiewicz");
464MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE"); 448MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
465MODULE_LICENSE("GPL"); 449MODULE_LICENSE("GPL");
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index b7d61dc64096..e3ea591f66d3 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -255,7 +255,7 @@ static int sgiioc4_dma_end(ide_drive_t *drive)
255 return dma_stat; 255 return dma_stat;
256} 256}
257 257
258static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed) 258static void sgiioc4_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
259{ 259{
260} 260}
261 261
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index d95df528562f..ddeda444a27a 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -229,19 +229,18 @@ static u8 sil_sata_udma_filter(ide_drive_t *drive)
229 229
230/** 230/**
231 * sil_set_pio_mode - set host controller for PIO mode 231 * sil_set_pio_mode - set host controller for PIO mode
232 * @hwif: port
232 * @drive: drive 233 * @drive: drive
233 * @pio: PIO mode number
234 * 234 *
235 * Load the timing settings for this device mode into the 235 * Load the timing settings for this device mode into the
236 * controller. 236 * controller.
237 */ 237 */
238 238
239static void sil_set_pio_mode(ide_drive_t *drive, u8 pio) 239static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
240{ 240{
241 static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 }; 241 static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
242 static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 242 static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
243 243
244 ide_hwif_t *hwif = drive->hwif;
245 struct pci_dev *dev = to_pci_dev(hwif->dev); 244 struct pci_dev *dev = to_pci_dev(hwif->dev);
246 ide_drive_t *pair = ide_get_pair_dev(drive); 245 ide_drive_t *pair = ide_get_pair_dev(drive);
247 u32 speedt = 0; 246 u32 speedt = 0;
@@ -249,6 +248,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
249 unsigned long addr = siimage_seldev(drive, 0x04); 248 unsigned long addr = siimage_seldev(drive, 0x04);
250 unsigned long tfaddr = siimage_selreg(hwif, 0x02); 249 unsigned long tfaddr = siimage_selreg(hwif, 0x02);
251 unsigned long base = (unsigned long)hwif->hwif_data; 250 unsigned long base = (unsigned long)hwif->hwif_data;
251 const u8 pio = drive->pio_mode - XFER_PIO_0;
252 u8 tf_pio = pio; 252 u8 tf_pio = pio;
253 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 253 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
254 u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84) 254 u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
@@ -258,7 +258,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
258 258
259 /* trim *taskfile* PIO to the slowest of the master/slave */ 259 /* trim *taskfile* PIO to the slowest of the master/slave */
260 if (pair) { 260 if (pair) {
261 u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4); 261 u8 pair_pio = pair->pio_mode - XFER_PIO_0;
262 262
263 if (pair_pio < tf_pio) 263 if (pair_pio < tf_pio)
264 tf_pio = pair_pio; 264 tf_pio = pair_pio;
@@ -289,19 +289,18 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
289 289
290/** 290/**
291 * sil_set_dma_mode - set host controller for DMA mode 291 * sil_set_dma_mode - set host controller for DMA mode
292 * @hwif: port
292 * @drive: drive 293 * @drive: drive
293 * @speed: DMA mode
294 * 294 *
295 * Tune the SiI chipset for the desired DMA mode. 295 * Tune the SiI chipset for the desired DMA mode.
296 */ 296 */
297 297
298static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed) 298static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
299{ 299{
300 static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }; 300 static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
301 static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 }; 301 static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
302 static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 }; 302 static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 };
303 303
304 ide_hwif_t *hwif = drive->hwif;
305 struct pci_dev *dev = to_pci_dev(hwif->dev); 304 struct pci_dev *dev = to_pci_dev(hwif->dev);
306 unsigned long base = (unsigned long)hwif->hwif_data; 305 unsigned long base = (unsigned long)hwif->hwif_data;
307 u16 ultra = 0, multi = 0; 306 u16 ultra = 0, multi = 0;
@@ -311,6 +310,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
311 : (mmio ? 0xB4 : 0x80); 310 : (mmio ? 0xB4 : 0x80);
312 unsigned long ma = siimage_seldev(drive, 0x08); 311 unsigned long ma = siimage_seldev(drive, 0x08);
313 unsigned long ua = siimage_seldev(drive, 0x0C); 312 unsigned long ua = siimage_seldev(drive, 0x0C);
313 const u8 speed = drive->dma_mode;
314 314
315 scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A)); 315 scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A));
316 mode = sil_ioread8 (dev, base + addr_mask); 316 mode = sil_ioread8 (dev, base + addr_mask);
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 468706082fb5..db7f4e761dbc 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -290,10 +290,10 @@ static void config_drive_art_rwp(ide_drive_t *drive)
290 pci_write_config_byte(dev, 0x4b, rw_prefetch); 290 pci_write_config_byte(dev, 0x4b, rw_prefetch);
291} 291}
292 292
293static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) 293static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
294{ 294{
295 config_drive_art_rwp(drive); 295 config_drive_art_rwp(drive);
296 sis_program_timings(drive, XFER_PIO_0 + pio); 296 sis_program_timings(drive, drive->pio_mode);
297} 297}
298 298
299static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode) 299static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode)
@@ -340,8 +340,10 @@ static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode)
340 sis_ata33_program_udma_timings(drive, mode); 340 sis_ata33_program_udma_timings(drive, mode);
341} 341}
342 342
343static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed) 343static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
344{ 344{
345 const u8 speed = drive->dma_mode;
346
345 if (speed >= XFER_UDMA_0) 347 if (speed >= XFER_UDMA_0)
346 sis_program_udma_timings(drive, speed); 348 sis_program_udma_timings(drive, speed);
347 else 349 else
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index 3c2bbf0057ea..f21dc2ad7682 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -63,12 +63,13 @@ static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
63/* 63/*
64 * Configure the chipset for PIO mode. 64 * Configure the chipset for PIO mode.
65 */ 65 */
66static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio) 66static void sl82c105_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
67{ 67{
68 struct pci_dev *dev = to_pci_dev(drive->hwif->dev); 68 struct pci_dev *dev = to_pci_dev(hwif->dev);
69 unsigned long timings = (unsigned long)ide_get_drivedata(drive); 69 unsigned long timings = (unsigned long)ide_get_drivedata(drive);
70 int reg = 0x44 + drive->dn * 4; 70 int reg = 0x44 + drive->dn * 4;
71 u16 drv_ctrl; 71 u16 drv_ctrl;
72 const u8 pio = drive->pio_mode - XFER_PIO_0;
72 73
73 drv_ctrl = get_pio_timings(drive, pio); 74 drv_ctrl = get_pio_timings(drive, pio);
74 75
@@ -91,11 +92,12 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
91/* 92/*
92 * Configure the chipset for DMA mode. 93 * Configure the chipset for DMA mode.
93 */ 94 */
94static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed) 95static void sl82c105_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
95{ 96{
96 static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200}; 97 static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200};
97 unsigned long timings = (unsigned long)ide_get_drivedata(drive); 98 unsigned long timings = (unsigned long)ide_get_drivedata(drive);
98 u16 drv_ctrl; 99 u16 drv_ctrl;
100 const u8 speed = drive->dma_mode;
99 101
100 drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0]; 102 drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
101 103
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
index 1ccfb40e7215..864ffe0e26d9 100644
--- a/drivers/ide/slc90e66.c
+++ b/drivers/ide/slc90e66.c
@@ -18,9 +18,8 @@
18 18
19static DEFINE_SPINLOCK(slc90e66_lock); 19static DEFINE_SPINLOCK(slc90e66_lock);
20 20
21static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio) 21static void slc90e66_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
22{ 22{
23 ide_hwif_t *hwif = drive->hwif;
24 struct pci_dev *dev = to_pci_dev(hwif->dev); 23 struct pci_dev *dev = to_pci_dev(hwif->dev);
25 int is_slave = drive->dn & 1; 24 int is_slave = drive->dn & 1;
26 int master_port = hwif->channel ? 0x42 : 0x40; 25 int master_port = hwif->channel ? 0x42 : 0x40;
@@ -29,6 +28,8 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
29 u16 master_data; 28 u16 master_data;
30 u8 slave_data; 29 u8 slave_data;
31 int control = 0; 30 int control = 0;
31 const u8 pio = drive->pio_mode - XFER_PIO_0;
32
32 /* ISP RTC */ 33 /* ISP RTC */
33 static const u8 timings[][2] = { 34 static const u8 timings[][2] = {
34 { 0, 0 }, 35 { 0, 0 },
@@ -71,14 +72,14 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
71 spin_unlock_irqrestore(&slc90e66_lock, flags); 72 spin_unlock_irqrestore(&slc90e66_lock, flags);
72} 73}
73 74
74static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed) 75static void slc90e66_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
75{ 76{
76 ide_hwif_t *hwif = drive->hwif;
77 struct pci_dev *dev = to_pci_dev(hwif->dev); 77 struct pci_dev *dev = to_pci_dev(hwif->dev);
78 u8 maslave = hwif->channel ? 0x42 : 0x40; 78 u8 maslave = hwif->channel ? 0x42 : 0x40;
79 int sitre = 0, a_speed = 7 << (drive->dn * 4); 79 int sitre = 0, a_speed = 7 << (drive->dn * 4);
80 int u_speed = 0, u_flag = 1 << drive->dn; 80 int u_speed = 0, u_flag = 1 << drive->dn;
81 u16 reg4042, reg44, reg48, reg4a; 81 u16 reg4042, reg44, reg48, reg4a;
82 const u8 speed = drive->dma_mode;
82 83
83 pci_read_config_word(dev, maslave, &reg4042); 84 pci_read_config_word(dev, maslave, &reg4042);
84 sitre = (reg4042 & 0x4000) ? 1 : 0; 85 sitre = (reg4042 & 0x4000) ? 1 : 0;
@@ -98,7 +99,6 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
98 } 99 }
99 } else { 100 } else {
100 const u8 mwdma_to_pio[] = { 0, 3, 4 }; 101 const u8 mwdma_to_pio[] = { 0, 3, 4 };
101 u8 pio;
102 102
103 if (reg48 & u_flag) 103 if (reg48 & u_flag)
104 pci_write_config_word(dev, 0x48, reg48 & ~u_flag); 104 pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
@@ -106,11 +106,12 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
106 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); 106 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
107 107
108 if (speed >= XFER_MW_DMA_0) 108 if (speed >= XFER_MW_DMA_0)
109 pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; 109 drive->pio_mode =
110 mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
110 else 111 else
111 pio = 2; /* only SWDMA2 is allowed */ 112 drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
112 113
113 slc90e66_set_pio_mode(drive, pio); 114 slc90e66_set_pio_mode(hwif, drive);
114 } 115 }
115} 116}
116 117
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index 05a93d6baecc..e444d24934b3 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -13,11 +13,11 @@
13 13
14#define DRV_NAME "tc86c001" 14#define DRV_NAME "tc86c001"
15 15
16static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed) 16static void tc86c001_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
17{ 17{
18 ide_hwif_t *hwif = drive->hwif;
19 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00); 18 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
20 u16 mode, scr = inw(scr_port); 19 u16 mode, scr = inw(scr_port);
20 const u8 speed = drive->dma_mode;
21 21
22 switch (speed) { 22 switch (speed) {
23 case XFER_UDMA_4: mode = 0x00c0; break; 23 case XFER_UDMA_4: mode = 0x00c0; break;
@@ -41,9 +41,10 @@ static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
41 outw(scr, scr_port); 41 outw(scr, scr_port);
42} 42}
43 43
44static void tc86c001_set_pio_mode(ide_drive_t *drive, const u8 pio) 44static void tc86c001_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
45{ 45{
46 tc86c001_set_mode(drive, XFER_PIO_0 + pio); 46 drive->dma_mode = drive->pio_mode;
47 tc86c001_set_mode(hwif, drive);
47} 48}
48 49
49/* 50/*
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index 8773c3ba7462..7953447eae0f 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -34,9 +34,8 @@
34 34
35#define DRV_NAME "triflex" 35#define DRV_NAME "triflex"
36 36
37static void triflex_set_mode(ide_drive_t *drive, const u8 speed) 37static void triflex_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
38{ 38{
39 ide_hwif_t *hwif = drive->hwif;
40 struct pci_dev *dev = to_pci_dev(hwif->dev); 39 struct pci_dev *dev = to_pci_dev(hwif->dev);
41 u32 triflex_timings = 0; 40 u32 triflex_timings = 0;
42 u16 timing = 0; 41 u16 timing = 0;
@@ -44,7 +43,7 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
44 43
45 pci_read_config_dword(dev, channel_offset, &triflex_timings); 44 pci_read_config_dword(dev, channel_offset, &triflex_timings);
46 45
47 switch(speed) { 46 switch (drive->dma_mode) {
48 case XFER_MW_DMA_2: 47 case XFER_MW_DMA_2:
49 timing = 0x0103; 48 timing = 0x0103;
50 break; 49 break;
@@ -82,9 +81,10 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
82 pci_write_config_dword(dev, channel_offset, triflex_timings); 81 pci_write_config_dword(dev, channel_offset, triflex_timings);
83} 82}
84 83
85static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio) 84static void triflex_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
86{ 85{
87 triflex_set_mode(drive, XFER_PIO_0 + pio); 86 drive->dma_mode = drive->pio_mode;
87 triflex_set_mode(hwif, drive);
88} 88}
89 89
90static const struct ide_port_ops triflex_port_ops = { 90static const struct ide_port_ops triflex_port_ops = {
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index fd59c0d235b5..1d80f1fdbc97 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -56,16 +56,15 @@ static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
56 &tx4938_ebuscptr->cr[ebus_ch]); 56 &tx4938_ebuscptr->cr[ebus_ch]);
57} 57}
58 58
59static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 59static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
60{ 60{
61 ide_hwif_t *hwif = drive->hwif;
62 struct tx4938ide_platform_info *pdata = hwif->dev->platform_data; 61 struct tx4938ide_platform_info *pdata = hwif->dev->platform_data;
63 u8 safe = pio; 62 u8 safe = drive->pio_mode - XFER_PIO_0;
64 ide_drive_t *pair; 63 ide_drive_t *pair;
65 64
66 pair = ide_get_pair_dev(drive); 65 pair = ide_get_pair_dev(drive);
67 if (pair) 66 if (pair)
68 safe = min(safe, ide_get_best_pio_mode(pair, 255, 5)); 67 safe = min(safe, pair->pio_mode - XFER_PIO_0);
69 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe); 68 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe);
70} 69}
71 70
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 64b58ecc3f0e..3c7367751873 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -104,17 +104,17 @@ static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg)
104 104
105#define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base) 105#define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base)
106 106
107static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 107static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
108{ 108{
109 ide_hwif_t *hwif = drive->hwif;
110 int is_slave = drive->dn; 109 int is_slave = drive->dn;
111 u32 mask, val; 110 u32 mask, val;
111 const u8 pio = drive->pio_mode - XFER_PIO_0;
112 u8 safe = pio; 112 u8 safe = pio;
113 ide_drive_t *pair; 113 ide_drive_t *pair;
114 114
115 pair = ide_get_pair_dev(drive); 115 pair = ide_get_pair_dev(drive);
116 if (pair) 116 if (pair)
117 safe = min(safe, ide_get_best_pio_mode(pair, 255, 4)); 117 safe = min(safe, pair->pio_mode - XFER_PIO_0);
118 /* 118 /*
119 * Update Command Transfer Mode for master/slave and Data 119 * Update Command Transfer Mode for master/slave and Data
120 * Transfer Mode for this drive. 120 * Transfer Mode for this drive.
@@ -125,10 +125,10 @@ static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
125 /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */ 125 /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
126} 126}
127 127
128static void tx4939ide_set_dma_mode(ide_drive_t *drive, const u8 mode) 128static void tx4939ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
129{ 129{
130 ide_hwif_t *hwif = drive->hwif;
131 u32 mask, val; 130 u32 mask, val;
131 const u8 mode = drive->dma_mode;
132 132
133 /* Update Data Transfer Mode for this drive. */ 133 /* Update Data Transfer Mode for this drive. */
134 if (mode >= XFER_UDMA_0) 134 if (mode >= XFER_UDMA_0)
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 60f936e2319c..47adcd09cb26 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -104,10 +104,11 @@ static void umc_set_speeds(u8 speeds[])
104 speeds[0], speeds[1], speeds[2], speeds[3]); 104 speeds[0], speeds[1], speeds[2], speeds[3]);
105} 105}
106 106
107static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio) 107static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
108{ 108{
109 ide_hwif_t *hwif = drive->hwif, *mate = hwif->mate; 109 ide_hwif_t *mate = hwif->mate;
110 unsigned long uninitialized_var(flags); 110 unsigned long uninitialized_var(flags);
111 const u8 pio = drive->pio_mode - XFER_PIO_0;
111 112
112 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", 113 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
113 drive->name, pio, pio_to_umc[pio]); 114 drive->name, pio, pio_to_umc[pio]);
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 028de26a25fe..e65d010b708d 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -6,7 +6,7 @@
6 * vt8235, vt8237, vt8237a 6 * vt8235, vt8237, vt8237a
7 * 7 *
8 * Copyright (c) 2000-2002 Vojtech Pavlik 8 * Copyright (c) 2000-2002 Vojtech Pavlik
9 * Copyright (c) 2007 Bartlomiej Zolnierkiewicz 9 * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
10 * 10 *
11 * Based on the work of: 11 * Based on the work of:
12 * Michel Aubry 12 * Michel Aubry
@@ -54,6 +54,11 @@
54#define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */ 54#define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */
55#define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */ 55#define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */
56#define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */ 56#define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */
57#define VIA_SATA_PATA 0x80 /* SATA/PATA combined configuration */
58
59enum {
60 VIA_IDFLAG_SINGLE = (1 << 1), /* single channel controller */
61};
57 62
58/* 63/*
59 * VIA SouthBridge chips. 64 * VIA SouthBridge chips.
@@ -67,11 +72,13 @@ static struct via_isa_bridge {
67 u8 udma_mask; 72 u8 udma_mask;
68 u8 flags; 73 u8 flags;
69} via_isa_bridges[] = { 74} via_isa_bridges[] = {
70 { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 75 { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
71 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 76 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
72 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 77 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
78 { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
73 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 79 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
74 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 80 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
81 { "vt6415", PCI_DEVICE_ID_VIA_6410, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST },
75 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 82 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
76 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 83 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
77 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 84 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
@@ -92,6 +99,7 @@ static struct via_isa_bridge {
92 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO }, 99 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
93 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK }, 100 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
94 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID }, 101 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
102 { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
95 { NULL } 103 { NULL }
96}; 104};
97 105
@@ -102,6 +110,7 @@ struct via82cxxx_dev
102{ 110{
103 struct via_isa_bridge *via_config; 111 struct via_isa_bridge *via_config;
104 unsigned int via_80w; 112 unsigned int via_80w;
113 u8 cached_device[2];
105}; 114};
106 115
107/** 116/**
@@ -137,30 +146,45 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
137 case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break; 146 case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break;
138 case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break; 147 case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
139 case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break; 148 case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
140 default: return;
141 } 149 }
142 150
143 pci_write_config_byte(dev, VIA_UDMA_TIMING + (3 - dn), t); 151 /* Set UDMA unless device is not UDMA capable */
152 if (vdev->via_config->udma_mask) {
153 u8 udma_etc;
154
155 pci_read_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, &udma_etc);
156
157 /* clear transfer mode bit */
158 udma_etc &= ~0x20;
159
160 if (timing->udma) {
161 /* preserve 80-wire cable detection bit */
162 udma_etc &= 0x10;
163 udma_etc |= t;
164 }
165
166 pci_write_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, udma_etc);
167 }
144} 168}
145 169
146/** 170/**
147 * via_set_drive - configure transfer mode 171 * via_set_drive - configure transfer mode
172 * @hwif: port
148 * @drive: Drive to set up 173 * @drive: Drive to set up
149 * @speed: desired speed
150 * 174 *
151 * via_set_drive() computes timing values configures the chipset to 175 * via_set_drive() computes timing values configures the chipset to
152 * a desired transfer mode. It also can be called by upper layers. 176 * a desired transfer mode. It also can be called by upper layers.
153 */ 177 */
154 178
155static void via_set_drive(ide_drive_t *drive, const u8 speed) 179static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
156{ 180{
157 ide_hwif_t *hwif = drive->hwif;
158 ide_drive_t *peer = ide_get_pair_dev(drive); 181 ide_drive_t *peer = ide_get_pair_dev(drive);
159 struct pci_dev *dev = to_pci_dev(hwif->dev); 182 struct pci_dev *dev = to_pci_dev(hwif->dev);
160 struct ide_host *host = pci_get_drvdata(dev); 183 struct ide_host *host = pci_get_drvdata(dev);
161 struct via82cxxx_dev *vdev = host->host_priv; 184 struct via82cxxx_dev *vdev = host->host_priv;
162 struct ide_timing t, p; 185 struct ide_timing t, p;
163 unsigned int T, UT; 186 unsigned int T, UT;
187 const u8 speed = drive->dma_mode;
164 188
165 T = 1000000000 / via_clock; 189 T = 1000000000 / via_clock;
166 190
@@ -175,7 +199,7 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed)
175 ide_timing_compute(drive, speed, &t, T, UT); 199 ide_timing_compute(drive, speed, &t, T, UT);
176 200
177 if (peer) { 201 if (peer) {
178 ide_timing_compute(peer, peer->current_speed, &p, T, UT); 202 ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
179 ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT); 203 ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
180 } 204 }
181 205
@@ -184,22 +208,24 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed)
184 208
185/** 209/**
186 * via_set_pio_mode - set host controller for PIO mode 210 * via_set_pio_mode - set host controller for PIO mode
211 * @hwif: port
187 * @drive: drive 212 * @drive: drive
188 * @pio: PIO mode number
189 * 213 *
190 * A callback from the upper layers for PIO-only tuning. 214 * A callback from the upper layers for PIO-only tuning.
191 */ 215 */
192 216
193static void via_set_pio_mode(ide_drive_t *drive, const u8 pio) 217static void via_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
194{ 218{
195 via_set_drive(drive, XFER_PIO_0 + pio); 219 drive->dma_mode = drive->pio_mode;
220 via_set_drive(hwif, drive);
196} 221}
197 222
198static struct via_isa_bridge *via_config_find(struct pci_dev **isa) 223static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
199{ 224{
200 struct via_isa_bridge *via_config; 225 struct via_isa_bridge *via_config;
201 226
202 for (via_config = via_isa_bridges; via_config->id; via_config++) 227 for (via_config = via_isa_bridges;
228 via_config->id != PCI_DEVICE_ID_VIA_ANON; via_config++)
203 if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA + 229 if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA +
204 !!(via_config->flags & VIA_BAD_ID), 230 !!(via_config->flags & VIA_BAD_ID),
205 via_config->id, NULL))) { 231 via_config->id, NULL))) {
@@ -362,6 +388,9 @@ static u8 via82cxxx_cable_detect(ide_hwif_t *hwif)
362 if (via_cable_override(pdev)) 388 if (via_cable_override(pdev))
363 return ATA_CBL_PATA40_SHORT; 389 return ATA_CBL_PATA40_SHORT;
364 390
391 if ((vdev->via_config->flags & VIA_SATA_PATA) && hwif->channel == 0)
392 return ATA_CBL_SATA;
393
365 if ((vdev->via_80w >> hwif->channel) & 1) 394 if ((vdev->via_80w >> hwif->channel) & 1)
366 return ATA_CBL_PATA80; 395 return ATA_CBL_PATA80;
367 else 396 else
@@ -374,10 +403,66 @@ static const struct ide_port_ops via_port_ops = {
374 .cable_detect = via82cxxx_cable_detect, 403 .cable_detect = via82cxxx_cable_detect,
375}; 404};
376 405
406static void via_write_devctl(ide_hwif_t *hwif, u8 ctl)
407{
408 struct via82cxxx_dev *vdev = hwif->host->host_priv;
409
410 outb(ctl, hwif->io_ports.ctl_addr);
411 outb(vdev->cached_device[hwif->channel], hwif->io_ports.device_addr);
412}
413
414static void __via_dev_select(ide_drive_t *drive, u8 select)
415{
416 ide_hwif_t *hwif = drive->hwif;
417 struct via82cxxx_dev *vdev = hwif->host->host_priv;
418
419 outb(select, hwif->io_ports.device_addr);
420 vdev->cached_device[hwif->channel] = select;
421}
422
423static void via_dev_select(ide_drive_t *drive)
424{
425 __via_dev_select(drive, drive->select | ATA_DEVICE_OBS);
426}
427
428static void via_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
429{
430 ide_hwif_t *hwif = drive->hwif;
431 struct ide_io_ports *io_ports = &hwif->io_ports;
432
433 if (valid & IDE_VALID_FEATURE)
434 outb(tf->feature, io_ports->feature_addr);
435 if (valid & IDE_VALID_NSECT)
436 outb(tf->nsect, io_ports->nsect_addr);
437 if (valid & IDE_VALID_LBAL)
438 outb(tf->lbal, io_ports->lbal_addr);
439 if (valid & IDE_VALID_LBAM)
440 outb(tf->lbam, io_ports->lbam_addr);
441 if (valid & IDE_VALID_LBAH)
442 outb(tf->lbah, io_ports->lbah_addr);
443 if (valid & IDE_VALID_DEVICE)
444 __via_dev_select(drive, tf->device);
445}
446
447const struct ide_tp_ops via_tp_ops = {
448 .exec_command = ide_exec_command,
449 .read_status = ide_read_status,
450 .read_altstatus = ide_read_altstatus,
451 .write_devctl = via_write_devctl,
452
453 .dev_select = via_dev_select,
454 .tf_load = via_tf_load,
455 .tf_read = ide_tf_read,
456
457 .input_data = ide_input_data,
458 .output_data = ide_output_data,
459};
460
377static const struct ide_port_info via82cxxx_chipset __devinitdata = { 461static const struct ide_port_info via82cxxx_chipset __devinitdata = {
378 .name = DRV_NAME, 462 .name = DRV_NAME,
379 .init_chipset = init_chipset_via82cxxx, 463 .init_chipset = init_chipset_via82cxxx,
380 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, 464 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
465 .tp_ops = &via_tp_ops,
381 .port_ops = &via_port_ops, 466 .port_ops = &via_port_ops,
382 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | 467 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
383 IDE_HFLAG_POST_SET_MODE | 468 IDE_HFLAG_POST_SET_MODE |
@@ -402,11 +487,6 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
402 * Find the ISA bridge and check we know what it is. 487 * Find the ISA bridge and check we know what it is.
403 */ 488 */
404 via_config = via_config_find(&isa); 489 via_config = via_config_find(&isa);
405 if (!via_config->id) {
406 printk(KERN_WARNING DRV_NAME " %s: unknown chipset, skipping\n",
407 pci_name(dev));
408 return -ENODEV;
409 }
410 490
411 /* 491 /*
412 * Print the boot message. 492 * Print the boot message.
@@ -436,10 +516,13 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
436 via_clock = 33333; 516 via_clock = 33333;
437 } 517 }
438 518
439 if (idx == 0) 519 if (idx == 1)
440 d.host_flags |= IDE_HFLAG_NO_AUTODMA;
441 else
442 d.enablebits[1].reg = d.enablebits[0].reg = 0; 520 d.enablebits[1].reg = d.enablebits[0].reg = 0;
521 else
522 d.host_flags |= IDE_HFLAG_NO_AUTODMA;
523
524 if (idx == VIA_IDFLAG_SINGLE)
525 d.host_flags |= IDE_HFLAG_SINGLE;
443 526
444 if ((via_config->flags & VIA_NO_UNMASK) == 0) 527 if ((via_config->flags & VIA_NO_UNMASK) == 0)
445 d.host_flags |= IDE_HFLAG_UNMASK_IRQS; 528 d.host_flags |= IDE_HFLAG_UNMASK_IRQS;
@@ -475,8 +558,9 @@ static const struct pci_device_id via_pci_tbl[] = {
475 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 }, 558 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
476 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 }, 559 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
477 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 }, 560 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
478 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 }, 561 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), VIA_IDFLAG_SINGLE },
479 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 }, 562 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
563 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6415), 1 },
480 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 }, 564 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
481 { 0, }, 565 { 0, },
482}; 566};
@@ -504,6 +588,6 @@ static void __exit via_ide_exit(void)
504module_init(via_ide_init); 588module_init(via_ide_init);
505module_exit(via_ide_exit); 589module_exit(via_ide_exit);
506 590
507MODULE_AUTHOR("Vojtech Pavlik, Michel Aubry, Jeff Garzik, Andre Hedrick"); 591MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz, Michel Aubry, Jeff Garzik, Andre Hedrick");
508MODULE_DESCRIPTION("PCI driver module for VIA IDE"); 592MODULE_DESCRIPTION("PCI driver module for VIA IDE");
509MODULE_LICENSE("GPL"); 593MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index dd0db67bf8d7..975adce5f40c 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -20,6 +20,7 @@ config INFINIBAND_USER_MAD
20 20
21config INFINIBAND_USER_ACCESS 21config INFINIBAND_USER_ACCESS
22 tristate "InfiniBand userspace access (verbs and CM)" 22 tristate "InfiniBand userspace access (verbs and CM)"
23 select ANON_INODES
23 ---help--- 24 ---help---
24 Userspace InfiniBand access support. This enables the 25 Userspace InfiniBand access support. This enables the
25 kernel side of userspace verbs and the userspace 26 kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f504c9b00c1b..1b09b735c5a8 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1215,15 +1215,18 @@ static void ib_ucm_release_dev(struct device *dev)
1215 1215
1216 ucm_dev = container_of(dev, struct ib_ucm_device, dev); 1216 ucm_dev = container_of(dev, struct ib_ucm_device, dev);
1217 cdev_del(&ucm_dev->cdev); 1217 cdev_del(&ucm_dev->cdev);
1218 clear_bit(ucm_dev->devnum, dev_map); 1218 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1219 clear_bit(ucm_dev->devnum, dev_map);
1220 else
1221 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
1219 kfree(ucm_dev); 1222 kfree(ucm_dev);
1220} 1223}
1221 1224
1222static const struct file_operations ucm_fops = { 1225static const struct file_operations ucm_fops = {
1223 .owner = THIS_MODULE, 1226 .owner = THIS_MODULE,
1224 .open = ib_ucm_open, 1227 .open = ib_ucm_open,
1225 .release = ib_ucm_close, 1228 .release = ib_ucm_close,
1226 .write = ib_ucm_write, 1229 .write = ib_ucm_write,
1227 .poll = ib_ucm_poll, 1230 .poll = ib_ucm_poll,
1228}; 1231};
1229 1232
@@ -1237,8 +1240,32 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1237} 1240}
1238static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1241static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1239 1242
1243static dev_t overflow_maj;
1244static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1245static int find_overflow_devnum(void)
1246{
1247 int ret;
1248
1249 if (!overflow_maj) {
1250 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
1251 "infiniband_cm");
1252 if (ret) {
1253 printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
1254 return ret;
1255 }
1256 }
1257
1258 ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
1259 if (ret >= IB_UCM_MAX_DEVICES)
1260 return -1;
1261
1262 return ret;
1263}
1264
1240static void ib_ucm_add_one(struct ib_device *device) 1265static void ib_ucm_add_one(struct ib_device *device)
1241{ 1266{
1267 int devnum;
1268 dev_t base;
1242 struct ib_ucm_device *ucm_dev; 1269 struct ib_ucm_device *ucm_dev;
1243 1270
1244 if (!device->alloc_ucontext || 1271 if (!device->alloc_ucontext ||
@@ -1251,16 +1278,25 @@ static void ib_ucm_add_one(struct ib_device *device)
1251 1278
1252 ucm_dev->ib_dev = device; 1279 ucm_dev->ib_dev = device;
1253 1280
1254 ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); 1281 devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
1255 if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES) 1282 if (devnum >= IB_UCM_MAX_DEVICES) {
1256 goto err; 1283 devnum = find_overflow_devnum();
1257 1284 if (devnum < 0)
1258 set_bit(ucm_dev->devnum, dev_map); 1285 goto err;
1286
1287 ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
1288 base = devnum + overflow_maj;
1289 set_bit(devnum, overflow_map);
1290 } else {
1291 ucm_dev->devnum = devnum;
1292 base = devnum + IB_UCM_BASE_DEV;
1293 set_bit(devnum, dev_map);
1294 }
1259 1295
1260 cdev_init(&ucm_dev->cdev, &ucm_fops); 1296 cdev_init(&ucm_dev->cdev, &ucm_fops);
1261 ucm_dev->cdev.owner = THIS_MODULE; 1297 ucm_dev->cdev.owner = THIS_MODULE;
1262 kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum); 1298 kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
1263 if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1)) 1299 if (cdev_add(&ucm_dev->cdev, base, 1))
1264 goto err; 1300 goto err;
1265 1301
1266 ucm_dev->dev.class = &cm_class; 1302 ucm_dev->dev.class = &cm_class;
@@ -1281,7 +1317,10 @@ err_dev:
1281 device_unregister(&ucm_dev->dev); 1317 device_unregister(&ucm_dev->dev);
1282err_cdev: 1318err_cdev:
1283 cdev_del(&ucm_dev->cdev); 1319 cdev_del(&ucm_dev->cdev);
1284 clear_bit(ucm_dev->devnum, dev_map); 1320 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1321 clear_bit(devnum, dev_map);
1322 else
1323 clear_bit(devnum, overflow_map);
1285err: 1324err:
1286 kfree(ucm_dev); 1325 kfree(ucm_dev);
1287 return; 1326 return;
@@ -1340,6 +1379,8 @@ static void __exit ib_ucm_cleanup(void)
1340 ib_unregister_client(&ucm_client); 1379 ib_unregister_client(&ucm_client);
1341 class_remove_file(&cm_class, &class_attr_abi_version); 1380 class_remove_file(&cm_class, &class_attr_abi_version);
1342 unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); 1381 unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
1382 if (overflow_maj)
1383 unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
1343 idr_destroy(&ctx_id_table); 1384 idr_destroy(&ctx_id_table);
1344} 1385}
1345 1386
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 8ec7876bedcf..650b501eb142 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -181,6 +181,7 @@ static const struct ib_field deth_table[] = {
181 * ib_ud_header_init - Initialize UD header structure 181 * ib_ud_header_init - Initialize UD header structure
182 * @payload_bytes:Length of packet payload 182 * @payload_bytes:Length of packet payload
183 * @grh_present:GRH flag (if non-zero, GRH will be included) 183 * @grh_present:GRH flag (if non-zero, GRH will be included)
184 * @immediate_present: specify if immediate data should be used
184 * @header:Structure to initialize 185 * @header:Structure to initialize
185 * 186 *
186 * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header, 187 * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
@@ -191,21 +192,13 @@ static const struct ib_field deth_table[] = {
191 */ 192 */
192void ib_ud_header_init(int payload_bytes, 193void ib_ud_header_init(int payload_bytes,
193 int grh_present, 194 int grh_present,
195 int immediate_present,
194 struct ib_ud_header *header) 196 struct ib_ud_header *header)
195{ 197{
196 int header_len;
197 u16 packet_length; 198 u16 packet_length;
198 199
199 memset(header, 0, sizeof *header); 200 memset(header, 0, sizeof *header);
200 201
201 header_len =
202 IB_LRH_BYTES +
203 IB_BTH_BYTES +
204 IB_DETH_BYTES;
205 if (grh_present) {
206 header_len += IB_GRH_BYTES;
207 }
208
209 header->lrh.link_version = 0; 202 header->lrh.link_version = 0;
210 header->lrh.link_next_header = 203 header->lrh.link_next_header =
211 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; 204 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
@@ -231,7 +224,8 @@ void ib_ud_header_init(int payload_bytes,
231 224
232 header->lrh.packet_length = cpu_to_be16(packet_length); 225 header->lrh.packet_length = cpu_to_be16(packet_length);
233 226
234 if (header->immediate_present) 227 header->immediate_present = immediate_present;
228 if (immediate_present)
235 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 229 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
236 else 230 else
237 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; 231 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 6f7c096abf13..4f906f0614f0 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -136,7 +136,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
136 down_write(&current->mm->mmap_sem); 136 down_write(&current->mm->mmap_sem);
137 137
138 locked = npages + current->mm->locked_vm; 138 locked = npages + current->mm->locked_vm;
139 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 139 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
140 140
141 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 141 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
142 ret = -ENOMEM; 142 ret = -ENOMEM;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7de02969ed7d..02d360cfc2f7 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -65,12 +65,9 @@ enum {
65}; 65};
66 66
67/* 67/*
68 * Our lifetime rules for these structs are the following: each time a 68 * Our lifetime rules for these structs are the following:
69 * device special file is opened, we look up the corresponding struct 69 * device special file is opened, we take a reference on the
70 * ib_umad_port by minor in the umad_port[] table while holding the 70 * ib_umad_port's struct ib_umad_device. We drop these
71 * port_lock. If this lookup succeeds, we take a reference on the
72 * ib_umad_port's struct ib_umad_device while still holding the
73 * port_lock; if the lookup fails, we fail the open(). We drop these
74 * references in the corresponding close(). 71 * references in the corresponding close().
75 * 72 *
76 * In addition to references coming from open character devices, there 73 * In addition to references coming from open character devices, there
@@ -78,19 +75,14 @@ enum {
78 * module's reference taken when allocating the ib_umad_device in 75 * module's reference taken when allocating the ib_umad_device in
79 * ib_umad_add_one(). 76 * ib_umad_add_one().
80 * 77 *
81 * When destroying an ib_umad_device, we clear all of its 78 * When destroying an ib_umad_device, we drop the module's reference.
82 * ib_umad_ports from umad_port[] while holding port_lock before
83 * dropping the module's reference to the ib_umad_device. This is
84 * always safe because any open() calls will either succeed and obtain
85 * a reference before we clear the umad_port[] entries, or fail after
86 * we clear the umad_port[] entries.
87 */ 79 */
88 80
89struct ib_umad_port { 81struct ib_umad_port {
90 struct cdev *cdev; 82 struct cdev cdev;
91 struct device *dev; 83 struct device *dev;
92 84
93 struct cdev *sm_cdev; 85 struct cdev sm_cdev;
94 struct device *sm_dev; 86 struct device *sm_dev;
95 struct semaphore sm_sem; 87 struct semaphore sm_sem;
96 88
@@ -136,7 +128,6 @@ static struct class *umad_class;
136static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); 128static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
137 129
138static DEFINE_SPINLOCK(port_lock); 130static DEFINE_SPINLOCK(port_lock);
139static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
140static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); 131static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
141 132
142static void ib_umad_add_one(struct ib_device *device); 133static void ib_umad_add_one(struct ib_device *device);
@@ -496,8 +487,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
496 ah_attr.ah_flags = IB_AH_GRH; 487 ah_attr.ah_flags = IB_AH_GRH;
497 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 488 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
498 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; 489 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
499 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); 490 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
500 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 491 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
501 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 492 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
502 } 493 }
503 494
@@ -528,9 +519,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
528 goto err_ah; 519 goto err_ah;
529 } 520 }
530 521
531 packet->msg->ah = ah; 522 packet->msg->ah = ah;
532 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 523 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
533 packet->msg->retries = packet->mad.hdr.retries; 524 packet->msg->retries = packet->mad.hdr.retries;
534 packet->msg->context[0] = packet; 525 packet->msg->context[0] = packet;
535 526
536 /* Copy MAD header. Any RMPP header is already in place. */ 527 /* Copy MAD header. Any RMPP header is already in place. */
@@ -779,15 +770,11 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
779/* 770/*
780 * ib_umad_open() does not need the BKL: 771 * ib_umad_open() does not need the BKL:
781 * 772 *
782 * - umad_port[] accesses are protected by port_lock, the 773 * - the ib_umad_port structures are properly reference counted, and
783 * ib_umad_port structures are properly reference counted, and
784 * everything else is purely local to the file being created, so 774 * everything else is purely local to the file being created, so
785 * races against other open calls are not a problem; 775 * races against other open calls are not a problem;
786 * - the ioctl method does not affect any global state outside of the 776 * - the ioctl method does not affect any global state outside of the
787 * file structure being operated on; 777 * file structure being operated on;
788 * - the port is added to umad_port[] as the last part of module
789 * initialization so the open method will either immediately run
790 * -ENXIO, or all required initialization will be done.
791 */ 778 */
792static int ib_umad_open(struct inode *inode, struct file *filp) 779static int ib_umad_open(struct inode *inode, struct file *filp)
793{ 780{
@@ -795,13 +782,10 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
795 struct ib_umad_file *file; 782 struct ib_umad_file *file;
796 int ret = 0; 783 int ret = 0;
797 784
798 spin_lock(&port_lock); 785 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
799 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
800 if (port) 786 if (port)
801 kref_get(&port->umad_dev->ref); 787 kref_get(&port->umad_dev->ref);
802 spin_unlock(&port_lock); 788 else
803
804 if (!port)
805 return -ENXIO; 789 return -ENXIO;
806 790
807 mutex_lock(&port->file_mutex); 791 mutex_lock(&port->file_mutex);
@@ -872,16 +856,16 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
872} 856}
873 857
874static const struct file_operations umad_fops = { 858static const struct file_operations umad_fops = {
875 .owner = THIS_MODULE, 859 .owner = THIS_MODULE,
876 .read = ib_umad_read, 860 .read = ib_umad_read,
877 .write = ib_umad_write, 861 .write = ib_umad_write,
878 .poll = ib_umad_poll, 862 .poll = ib_umad_poll,
879 .unlocked_ioctl = ib_umad_ioctl, 863 .unlocked_ioctl = ib_umad_ioctl,
880#ifdef CONFIG_COMPAT 864#ifdef CONFIG_COMPAT
881 .compat_ioctl = ib_umad_compat_ioctl, 865 .compat_ioctl = ib_umad_compat_ioctl,
882#endif 866#endif
883 .open = ib_umad_open, 867 .open = ib_umad_open,
884 .release = ib_umad_close 868 .release = ib_umad_close
885}; 869};
886 870
887static int ib_umad_sm_open(struct inode *inode, struct file *filp) 871static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -892,13 +876,10 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
892 }; 876 };
893 int ret; 877 int ret;
894 878
895 spin_lock(&port_lock); 879 port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
896 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
897 if (port) 880 if (port)
898 kref_get(&port->umad_dev->ref); 881 kref_get(&port->umad_dev->ref);
899 spin_unlock(&port_lock); 882 else
900
901 if (!port)
902 return -ENXIO; 883 return -ENXIO;
903 884
904 if (filp->f_flags & O_NONBLOCK) { 885 if (filp->f_flags & O_NONBLOCK) {
@@ -949,8 +930,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
949} 930}
950 931
951static const struct file_operations umad_sm_fops = { 932static const struct file_operations umad_sm_fops = {
952 .owner = THIS_MODULE, 933 .owner = THIS_MODULE,
953 .open = ib_umad_sm_open, 934 .open = ib_umad_sm_open,
954 .release = ib_umad_sm_close 935 .release = ib_umad_sm_close
955}; 936};
956 937
@@ -990,16 +971,51 @@ static ssize_t show_abi_version(struct class *class, char *buf)
990} 971}
991static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 972static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
992 973
974static dev_t overflow_maj;
975static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
976static int find_overflow_devnum(void)
977{
978 int ret;
979
980 if (!overflow_maj) {
981 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
982 "infiniband_mad");
983 if (ret) {
984 printk(KERN_ERR "user_mad: couldn't register dynamic device number\n");
985 return ret;
986 }
987 }
988
989 ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
990 if (ret >= IB_UMAD_MAX_PORTS)
991 return -1;
992
993 return ret;
994}
995
993static int ib_umad_init_port(struct ib_device *device, int port_num, 996static int ib_umad_init_port(struct ib_device *device, int port_num,
994 struct ib_umad_port *port) 997 struct ib_umad_port *port)
995{ 998{
999 int devnum;
1000 dev_t base;
1001
996 spin_lock(&port_lock); 1002 spin_lock(&port_lock);
997 port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); 1003 devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
998 if (port->dev_num >= IB_UMAD_MAX_PORTS) { 1004 if (devnum >= IB_UMAD_MAX_PORTS) {
999 spin_unlock(&port_lock); 1005 spin_unlock(&port_lock);
1000 return -1; 1006 devnum = find_overflow_devnum();
1007 if (devnum < 0)
1008 return -1;
1009
1010 spin_lock(&port_lock);
1011 port->dev_num = devnum + IB_UMAD_MAX_PORTS;
1012 base = devnum + overflow_maj;
1013 set_bit(devnum, overflow_map);
1014 } else {
1015 port->dev_num = devnum;
1016 base = devnum + base_dev;
1017 set_bit(devnum, dev_map);
1001 } 1018 }
1002 set_bit(port->dev_num, dev_map);
1003 spin_unlock(&port_lock); 1019 spin_unlock(&port_lock);
1004 1020
1005 port->ib_dev = device; 1021 port->ib_dev = device;
@@ -1008,17 +1024,14 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1008 mutex_init(&port->file_mutex); 1024 mutex_init(&port->file_mutex);
1009 INIT_LIST_HEAD(&port->file_list); 1025 INIT_LIST_HEAD(&port->file_list);
1010 1026
1011 port->cdev = cdev_alloc(); 1027 cdev_init(&port->cdev, &umad_fops);
1012 if (!port->cdev) 1028 port->cdev.owner = THIS_MODULE;
1013 return -1; 1029 kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
1014 port->cdev->owner = THIS_MODULE; 1030 if (cdev_add(&port->cdev, base, 1))
1015 port->cdev->ops = &umad_fops;
1016 kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num);
1017 if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
1018 goto err_cdev; 1031 goto err_cdev;
1019 1032
1020 port->dev = device_create(umad_class, device->dma_device, 1033 port->dev = device_create(umad_class, device->dma_device,
1021 port->cdev->dev, port, 1034 port->cdev.dev, port,
1022 "umad%d", port->dev_num); 1035 "umad%d", port->dev_num);
1023 if (IS_ERR(port->dev)) 1036 if (IS_ERR(port->dev))
1024 goto err_cdev; 1037 goto err_cdev;
@@ -1028,17 +1041,15 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1028 if (device_create_file(port->dev, &dev_attr_port)) 1041 if (device_create_file(port->dev, &dev_attr_port))
1029 goto err_dev; 1042 goto err_dev;
1030 1043
1031 port->sm_cdev = cdev_alloc(); 1044 base += IB_UMAD_MAX_PORTS;
1032 if (!port->sm_cdev) 1045 cdev_init(&port->sm_cdev, &umad_sm_fops);
1033 goto err_dev; 1046 port->sm_cdev.owner = THIS_MODULE;
1034 port->sm_cdev->owner = THIS_MODULE; 1047 kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
1035 port->sm_cdev->ops = &umad_sm_fops; 1048 if (cdev_add(&port->sm_cdev, base, 1))
1036 kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num);
1037 if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
1038 goto err_sm_cdev; 1049 goto err_sm_cdev;
1039 1050
1040 port->sm_dev = device_create(umad_class, device->dma_device, 1051 port->sm_dev = device_create(umad_class, device->dma_device,
1041 port->sm_cdev->dev, port, 1052 port->sm_cdev.dev, port,
1042 "issm%d", port->dev_num); 1053 "issm%d", port->dev_num);
1043 if (IS_ERR(port->sm_dev)) 1054 if (IS_ERR(port->sm_dev))
1044 goto err_sm_cdev; 1055 goto err_sm_cdev;
@@ -1048,24 +1059,23 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1048 if (device_create_file(port->sm_dev, &dev_attr_port)) 1059 if (device_create_file(port->sm_dev, &dev_attr_port))
1049 goto err_sm_dev; 1060 goto err_sm_dev;
1050 1061
1051 spin_lock(&port_lock);
1052 umad_port[port->dev_num] = port;
1053 spin_unlock(&port_lock);
1054
1055 return 0; 1062 return 0;
1056 1063
1057err_sm_dev: 1064err_sm_dev:
1058 device_destroy(umad_class, port->sm_cdev->dev); 1065 device_destroy(umad_class, port->sm_cdev.dev);
1059 1066
1060err_sm_cdev: 1067err_sm_cdev:
1061 cdev_del(port->sm_cdev); 1068 cdev_del(&port->sm_cdev);
1062 1069
1063err_dev: 1070err_dev:
1064 device_destroy(umad_class, port->cdev->dev); 1071 device_destroy(umad_class, port->cdev.dev);
1065 1072
1066err_cdev: 1073err_cdev:
1067 cdev_del(port->cdev); 1074 cdev_del(&port->cdev);
1068 clear_bit(port->dev_num, dev_map); 1075 if (port->dev_num < IB_UMAD_MAX_PORTS)
1076 clear_bit(devnum, dev_map);
1077 else
1078 clear_bit(devnum, overflow_map);
1069 1079
1070 return -1; 1080 return -1;
1071} 1081}
@@ -1079,15 +1089,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
1079 dev_set_drvdata(port->dev, NULL); 1089 dev_set_drvdata(port->dev, NULL);
1080 dev_set_drvdata(port->sm_dev, NULL); 1090 dev_set_drvdata(port->sm_dev, NULL);
1081 1091
1082 device_destroy(umad_class, port->cdev->dev); 1092 device_destroy(umad_class, port->cdev.dev);
1083 device_destroy(umad_class, port->sm_cdev->dev); 1093 device_destroy(umad_class, port->sm_cdev.dev);
1084 1094
1085 cdev_del(port->cdev); 1095 cdev_del(&port->cdev);
1086 cdev_del(port->sm_cdev); 1096 cdev_del(&port->sm_cdev);
1087
1088 spin_lock(&port_lock);
1089 umad_port[port->dev_num] = NULL;
1090 spin_unlock(&port_lock);
1091 1097
1092 mutex_lock(&port->file_mutex); 1098 mutex_lock(&port->file_mutex);
1093 1099
@@ -1106,7 +1112,10 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
1106 1112
1107 mutex_unlock(&port->file_mutex); 1113 mutex_unlock(&port->file_mutex);
1108 1114
1109 clear_bit(port->dev_num, dev_map); 1115 if (port->dev_num < IB_UMAD_MAX_PORTS)
1116 clear_bit(port->dev_num, dev_map);
1117 else
1118 clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
1110} 1119}
1111 1120
1112static void ib_umad_add_one(struct ib_device *device) 1121static void ib_umad_add_one(struct ib_device *device)
@@ -1214,6 +1223,8 @@ static void __exit ib_umad_cleanup(void)
1214 ib_unregister_client(&umad_client); 1223 ib_unregister_client(&umad_client);
1215 class_destroy(umad_class); 1224 class_destroy(umad_class);
1216 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1225 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
1226 if (overflow_maj)
1227 unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
1217} 1228}
1218 1229
1219module_init(ib_umad_init); 1230module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b3ea9587dc80..a078e5624d22 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -41,6 +41,7 @@
41#include <linux/idr.h> 41#include <linux/idr.h>
42#include <linux/mutex.h> 42#include <linux/mutex.h>
43#include <linux/completion.h> 43#include <linux/completion.h>
44#include <linux/cdev.h>
44 45
45#include <rdma/ib_verbs.h> 46#include <rdma/ib_verbs.h>
46#include <rdma/ib_umem.h> 47#include <rdma/ib_umem.h>
@@ -69,23 +70,23 @@
69 70
70struct ib_uverbs_device { 71struct ib_uverbs_device {
71 struct kref ref; 72 struct kref ref;
73 int num_comp_vectors;
72 struct completion comp; 74 struct completion comp;
73 int devnum;
74 struct cdev *cdev;
75 struct device *dev; 75 struct device *dev;
76 struct ib_device *ib_dev; 76 struct ib_device *ib_dev;
77 int num_comp_vectors; 77 int devnum;
78 struct cdev cdev;
78}; 79};
79 80
80struct ib_uverbs_event_file { 81struct ib_uverbs_event_file {
81 struct kref ref; 82 struct kref ref;
83 int is_async;
82 struct ib_uverbs_file *uverbs_file; 84 struct ib_uverbs_file *uverbs_file;
83 spinlock_t lock; 85 spinlock_t lock;
86 int is_closed;
84 wait_queue_head_t poll_wait; 87 wait_queue_head_t poll_wait;
85 struct fasync_struct *async_queue; 88 struct fasync_struct *async_queue;
86 struct list_head event_list; 89 struct list_head event_list;
87 int is_async;
88 int is_closed;
89}; 90};
90 91
91struct ib_uverbs_file { 92struct ib_uverbs_file {
@@ -145,7 +146,7 @@ extern struct idr ib_uverbs_srq_idr;
145void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); 146void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
146 147
147struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, 148struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
148 int is_async, int *fd); 149 int is_async);
149struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd); 150struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
150 151
151void ib_uverbs_release_ucq(struct ib_uverbs_file *file, 152void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 112d3970222a..f71cf138d674 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -301,10 +301,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
301 301
302 resp.num_comp_vectors = file->device->num_comp_vectors; 302 resp.num_comp_vectors = file->device->num_comp_vectors;
303 303
304 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); 304 ret = get_unused_fd();
305 if (ret < 0)
306 goto err_free;
307 resp.async_fd = ret;
308
309 filp = ib_uverbs_alloc_event_file(file, 1);
305 if (IS_ERR(filp)) { 310 if (IS_ERR(filp)) {
306 ret = PTR_ERR(filp); 311 ret = PTR_ERR(filp);
307 goto err_free; 312 goto err_fd;
308 } 313 }
309 314
310 if (copy_to_user((void __user *) (unsigned long) cmd.response, 315 if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -332,9 +337,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
332 return in_len; 337 return in_len;
333 338
334err_file: 339err_file:
335 put_unused_fd(resp.async_fd);
336 fput(filp); 340 fput(filp);
337 341
342err_fd:
343 put_unused_fd(resp.async_fd);
344
338err_free: 345err_free:
339 ibdev->dealloc_ucontext(ucontext); 346 ibdev->dealloc_ucontext(ucontext);
340 347
@@ -715,6 +722,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
715 struct ib_uverbs_create_comp_channel cmd; 722 struct ib_uverbs_create_comp_channel cmd;
716 struct ib_uverbs_create_comp_channel_resp resp; 723 struct ib_uverbs_create_comp_channel_resp resp;
717 struct file *filp; 724 struct file *filp;
725 int ret;
718 726
719 if (out_len < sizeof resp) 727 if (out_len < sizeof resp)
720 return -ENOSPC; 728 return -ENOSPC;
@@ -722,9 +730,16 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
722 if (copy_from_user(&cmd, buf, sizeof cmd)) 730 if (copy_from_user(&cmd, buf, sizeof cmd))
723 return -EFAULT; 731 return -EFAULT;
724 732
725 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); 733 ret = get_unused_fd();
726 if (IS_ERR(filp)) 734 if (ret < 0)
735 return ret;
736 resp.fd = ret;
737
738 filp = ib_uverbs_alloc_event_file(file, 0);
739 if (IS_ERR(filp)) {
740 put_unused_fd(resp.fd);
727 return PTR_ERR(filp); 741 return PTR_ERR(filp);
742 }
728 743
729 if (copy_to_user((void __user *) (unsigned long) cmd.response, 744 if (copy_to_user((void __user *) (unsigned long) cmd.response,
730 &resp, sizeof resp)) { 745 &resp, sizeof resp)) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f284ffd430e..4fa2e6516441 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -42,8 +42,8 @@
42#include <linux/poll.h> 42#include <linux/poll.h>
43#include <linux/sched.h> 43#include <linux/sched.h>
44#include <linux/file.h> 44#include <linux/file.h>
45#include <linux/mount.h>
46#include <linux/cdev.h> 45#include <linux/cdev.h>
46#include <linux/anon_inodes.h>
47 47
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
49 49
@@ -53,8 +53,6 @@ MODULE_AUTHOR("Roland Dreier");
53MODULE_DESCRIPTION("InfiniBand userspace verbs access"); 53MODULE_DESCRIPTION("InfiniBand userspace verbs access");
54MODULE_LICENSE("Dual BSD/GPL"); 54MODULE_LICENSE("Dual BSD/GPL");
55 55
56#define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */
57
58enum { 56enum {
59 IB_UVERBS_MAJOR = 231, 57 IB_UVERBS_MAJOR = 231,
60 IB_UVERBS_BASE_MINOR = 192, 58 IB_UVERBS_BASE_MINOR = 192,
@@ -75,44 +73,41 @@ DEFINE_IDR(ib_uverbs_qp_idr);
75DEFINE_IDR(ib_uverbs_srq_idr); 73DEFINE_IDR(ib_uverbs_srq_idr);
76 74
77static DEFINE_SPINLOCK(map_lock); 75static DEFINE_SPINLOCK(map_lock);
78static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
79static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); 76static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
80 77
81static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, 78static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
82 const char __user *buf, int in_len, 79 const char __user *buf, int in_len,
83 int out_len) = { 80 int out_len) = {
84 [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, 81 [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
85 [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, 82 [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
86 [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, 83 [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
87 [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, 84 [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
88 [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, 85 [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
89 [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, 86 [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
90 [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, 87 [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
91 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, 88 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
92 [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, 89 [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
93 [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, 90 [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
94 [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, 91 [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
95 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, 92 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
96 [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, 93 [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
97 [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, 94 [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
98 [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, 95 [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
99 [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, 96 [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
100 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, 97 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
101 [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, 98 [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
102 [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, 99 [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
103 [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, 100 [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
104 [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, 101 [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
105 [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, 102 [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
106 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, 103 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
107 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, 104 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
108 [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, 105 [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
109 [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, 106 [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
110 [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, 107 [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
111 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, 108 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
112}; 109};
113 110
114static struct vfsmount *uverbs_event_mnt;
115
116static void ib_uverbs_add_one(struct ib_device *device); 111static void ib_uverbs_add_one(struct ib_device *device);
117static void ib_uverbs_remove_one(struct ib_device *device); 112static void ib_uverbs_remove_one(struct ib_device *device);
118 113
@@ -370,7 +365,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
370 365
371static const struct file_operations uverbs_event_fops = { 366static const struct file_operations uverbs_event_fops = {
372 .owner = THIS_MODULE, 367 .owner = THIS_MODULE,
373 .read = ib_uverbs_event_read, 368 .read = ib_uverbs_event_read,
374 .poll = ib_uverbs_event_poll, 369 .poll = ib_uverbs_event_poll,
375 .release = ib_uverbs_event_close, 370 .release = ib_uverbs_event_close,
376 .fasync = ib_uverbs_event_fasync 371 .fasync = ib_uverbs_event_fasync
@@ -489,12 +484,10 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
489} 484}
490 485
491struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, 486struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
492 int is_async, int *fd) 487 int is_async)
493{ 488{
494 struct ib_uverbs_event_file *ev_file; 489 struct ib_uverbs_event_file *ev_file;
495 struct path path;
496 struct file *filp; 490 struct file *filp;
497 int ret;
498 491
499 ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL); 492 ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
500 if (!ev_file) 493 if (!ev_file)
@@ -509,38 +502,12 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
509 ev_file->is_async = is_async; 502 ev_file->is_async = is_async;
510 ev_file->is_closed = 0; 503 ev_file->is_closed = 0;
511 504
512 *fd = get_unused_fd(); 505 filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
513 if (*fd < 0) { 506 ev_file, O_RDONLY);
514 ret = *fd; 507 if (IS_ERR(filp))
515 goto err; 508 kfree(ev_file);
516 }
517
518 /*
519 * fops_get() can't fail here, because we're coming from a
520 * system call on a uverbs file, which will already have a
521 * module reference.
522 */
523 path.mnt = uverbs_event_mnt;
524 path.dentry = uverbs_event_mnt->mnt_root;
525 path_get(&path);
526 filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
527 if (!filp) {
528 ret = -ENFILE;
529 goto err_fd;
530 }
531
532 filp->private_data = ev_file;
533 509
534 return filp; 510 return filp;
535
536err_fd:
537 fops_put(&uverbs_event_fops);
538 path_put(&path);
539 put_unused_fd(*fd);
540
541err:
542 kfree(ev_file);
543 return ERR_PTR(ret);
544} 511}
545 512
546/* 513/*
@@ -617,14 +584,12 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
617/* 584/*
618 * ib_uverbs_open() does not need the BKL: 585 * ib_uverbs_open() does not need the BKL:
619 * 586 *
620 * - dev_table[] accesses are protected by map_lock, the 587 * - the ib_uverbs_device structures are properly reference counted and
621 * ib_uverbs_device structures are properly reference counted, and
622 * everything else is purely local to the file being created, so 588 * everything else is purely local to the file being created, so
623 * races against other open calls are not a problem; 589 * races against other open calls are not a problem;
624 * - there is no ioctl method to race against; 590 * - there is no ioctl method to race against;
625 * - the device is added to dev_table[] as the last part of module 591 * - the open method will either immediately run -ENXIO, or all
626 * initialization, the open method will either immediately run 592 * required initialization will be done.
627 * -ENXIO, or all required initialization will be done.
628 */ 593 */
629static int ib_uverbs_open(struct inode *inode, struct file *filp) 594static int ib_uverbs_open(struct inode *inode, struct file *filp)
630{ 595{
@@ -632,13 +597,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
632 struct ib_uverbs_file *file; 597 struct ib_uverbs_file *file;
633 int ret; 598 int ret;
634 599
635 spin_lock(&map_lock); 600 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
636 dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR];
637 if (dev) 601 if (dev)
638 kref_get(&dev->ref); 602 kref_get(&dev->ref);
639 spin_unlock(&map_lock); 603 else
640
641 if (!dev)
642 return -ENXIO; 604 return -ENXIO;
643 605
644 if (!try_module_get(dev->ib_dev->owner)) { 606 if (!try_module_get(dev->ib_dev->owner)) {
@@ -685,17 +647,17 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
685} 647}
686 648
687static const struct file_operations uverbs_fops = { 649static const struct file_operations uverbs_fops = {
688 .owner = THIS_MODULE, 650 .owner = THIS_MODULE,
689 .write = ib_uverbs_write, 651 .write = ib_uverbs_write,
690 .open = ib_uverbs_open, 652 .open = ib_uverbs_open,
691 .release = ib_uverbs_close 653 .release = ib_uverbs_close
692}; 654};
693 655
694static const struct file_operations uverbs_mmap_fops = { 656static const struct file_operations uverbs_mmap_fops = {
695 .owner = THIS_MODULE, 657 .owner = THIS_MODULE,
696 .write = ib_uverbs_write, 658 .write = ib_uverbs_write,
697 .mmap = ib_uverbs_mmap, 659 .mmap = ib_uverbs_mmap,
698 .open = ib_uverbs_open, 660 .open = ib_uverbs_open,
699 .release = ib_uverbs_close 661 .release = ib_uverbs_close
700}; 662};
701 663
@@ -735,8 +697,38 @@ static ssize_t show_abi_version(struct class *class, char *buf)
735} 697}
736static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 698static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
737 699
700static dev_t overflow_maj;
701static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
702
703/*
704 * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
705 * requesting a new major number and doubling the number of max devices we
706 * support. It's stupid, but simple.
707 */
708static int find_overflow_devnum(void)
709{
710 int ret;
711
712 if (!overflow_maj) {
713 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
714 "infiniband_verbs");
715 if (ret) {
716 printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
717 return ret;
718 }
719 }
720
721 ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
722 if (ret >= IB_UVERBS_MAX_DEVICES)
723 return -1;
724
725 return ret;
726}
727
738static void ib_uverbs_add_one(struct ib_device *device) 728static void ib_uverbs_add_one(struct ib_device *device)
739{ 729{
730 int devnum;
731 dev_t base;
740 struct ib_uverbs_device *uverbs_dev; 732 struct ib_uverbs_device *uverbs_dev;
741 733
742 if (!device->alloc_ucontext) 734 if (!device->alloc_ucontext)
@@ -750,28 +742,36 @@ static void ib_uverbs_add_one(struct ib_device *device)
750 init_completion(&uverbs_dev->comp); 742 init_completion(&uverbs_dev->comp);
751 743
752 spin_lock(&map_lock); 744 spin_lock(&map_lock);
753 uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); 745 devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
754 if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) { 746 if (devnum >= IB_UVERBS_MAX_DEVICES) {
755 spin_unlock(&map_lock); 747 spin_unlock(&map_lock);
756 goto err; 748 devnum = find_overflow_devnum();
749 if (devnum < 0)
750 goto err;
751
752 spin_lock(&map_lock);
753 uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
754 base = devnum + overflow_maj;
755 set_bit(devnum, overflow_map);
756 } else {
757 uverbs_dev->devnum = devnum;
758 base = devnum + IB_UVERBS_BASE_DEV;
759 set_bit(devnum, dev_map);
757 } 760 }
758 set_bit(uverbs_dev->devnum, dev_map);
759 spin_unlock(&map_lock); 761 spin_unlock(&map_lock);
760 762
761 uverbs_dev->ib_dev = device; 763 uverbs_dev->ib_dev = device;
762 uverbs_dev->num_comp_vectors = device->num_comp_vectors; 764 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
763 765
764 uverbs_dev->cdev = cdev_alloc(); 766 cdev_init(&uverbs_dev->cdev, NULL);
765 if (!uverbs_dev->cdev) 767 uverbs_dev->cdev.owner = THIS_MODULE;
766 goto err; 768 uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
767 uverbs_dev->cdev->owner = THIS_MODULE; 769 kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
768 uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; 770 if (cdev_add(&uverbs_dev->cdev, base, 1))
769 kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum);
770 if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
771 goto err_cdev; 771 goto err_cdev;
772 772
773 uverbs_dev->dev = device_create(uverbs_class, device->dma_device, 773 uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
774 uverbs_dev->cdev->dev, uverbs_dev, 774 uverbs_dev->cdev.dev, uverbs_dev,
775 "uverbs%d", uverbs_dev->devnum); 775 "uverbs%d", uverbs_dev->devnum);
776 if (IS_ERR(uverbs_dev->dev)) 776 if (IS_ERR(uverbs_dev->dev))
777 goto err_cdev; 777 goto err_cdev;
@@ -781,20 +781,19 @@ static void ib_uverbs_add_one(struct ib_device *device)
781 if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version)) 781 if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
782 goto err_class; 782 goto err_class;
783 783
784 spin_lock(&map_lock);
785 dev_table[uverbs_dev->devnum] = uverbs_dev;
786 spin_unlock(&map_lock);
787
788 ib_set_client_data(device, &uverbs_client, uverbs_dev); 784 ib_set_client_data(device, &uverbs_client, uverbs_dev);
789 785
790 return; 786 return;
791 787
792err_class: 788err_class:
793 device_destroy(uverbs_class, uverbs_dev->cdev->dev); 789 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
794 790
795err_cdev: 791err_cdev:
796 cdev_del(uverbs_dev->cdev); 792 cdev_del(&uverbs_dev->cdev);
797 clear_bit(uverbs_dev->devnum, dev_map); 793 if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
794 clear_bit(devnum, dev_map);
795 else
796 clear_bit(devnum, overflow_map);
798 797
799err: 798err:
800 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 799 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
@@ -811,35 +810,19 @@ static void ib_uverbs_remove_one(struct ib_device *device)
811 return; 810 return;
812 811
813 dev_set_drvdata(uverbs_dev->dev, NULL); 812 dev_set_drvdata(uverbs_dev->dev, NULL);
814 device_destroy(uverbs_class, uverbs_dev->cdev->dev); 813 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
815 cdev_del(uverbs_dev->cdev); 814 cdev_del(&uverbs_dev->cdev);
816 815
817 spin_lock(&map_lock); 816 if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
818 dev_table[uverbs_dev->devnum] = NULL; 817 clear_bit(uverbs_dev->devnum, dev_map);
819 spin_unlock(&map_lock); 818 else
820 819 clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
821 clear_bit(uverbs_dev->devnum, dev_map);
822 820
823 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 821 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
824 wait_for_completion(&uverbs_dev->comp); 822 wait_for_completion(&uverbs_dev->comp);
825 kfree(uverbs_dev); 823 kfree(uverbs_dev);
826} 824}
827 825
828static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
829 const char *dev_name, void *data,
830 struct vfsmount *mnt)
831{
832 return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
833 INFINIBANDEVENTFS_MAGIC, mnt);
834}
835
836static struct file_system_type uverbs_event_fs = {
837 /* No owner field so module can be unloaded */
838 .name = "infinibandeventfs",
839 .get_sb = uverbs_event_get_sb,
840 .kill_sb = kill_litter_super
841};
842
843static int __init ib_uverbs_init(void) 826static int __init ib_uverbs_init(void)
844{ 827{
845 int ret; 828 int ret;
@@ -864,33 +847,14 @@ static int __init ib_uverbs_init(void)
864 goto out_class; 847 goto out_class;
865 } 848 }
866 849
867 ret = register_filesystem(&uverbs_event_fs);
868 if (ret) {
869 printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n");
870 goto out_class;
871 }
872
873 uverbs_event_mnt = kern_mount(&uverbs_event_fs);
874 if (IS_ERR(uverbs_event_mnt)) {
875 ret = PTR_ERR(uverbs_event_mnt);
876 printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n");
877 goto out_fs;
878 }
879
880 ret = ib_register_client(&uverbs_client); 850 ret = ib_register_client(&uverbs_client);
881 if (ret) { 851 if (ret) {
882 printk(KERN_ERR "user_verbs: couldn't register client\n"); 852 printk(KERN_ERR "user_verbs: couldn't register client\n");
883 goto out_mnt; 853 goto out_class;
884 } 854 }
885 855
886 return 0; 856 return 0;
887 857
888out_mnt:
889 mntput(uverbs_event_mnt);
890
891out_fs:
892 unregister_filesystem(&uverbs_event_fs);
893
894out_class: 858out_class:
895 class_destroy(uverbs_class); 859 class_destroy(uverbs_class);
896 860
@@ -904,10 +868,10 @@ out:
904static void __exit ib_uverbs_cleanup(void) 868static void __exit ib_uverbs_cleanup(void)
905{ 869{
906 ib_unregister_client(&uverbs_client); 870 ib_unregister_client(&uverbs_client);
907 mntput(uverbs_event_mnt);
908 unregister_filesystem(&uverbs_event_fs);
909 class_destroy(uverbs_class); 871 class_destroy(uverbs_class);
910 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); 872 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
873 if (overflow_maj)
874 unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
911 idr_destroy(&ib_uverbs_pd_idr); 875 idr_destroy(&ib_uverbs_pd_idr);
912 idr_destroy(&ib_uverbs_mr_idr); 876 idr_destroy(&ib_uverbs_mr_idr);
913 idr_destroy(&ib_uverbs_mw_idr); 877 idr_destroy(&ib_uverbs_mw_idr);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 0677fc7dfd51..a28e862f2d68 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { 109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
110 udelay(1); 110 udelay(1);
111 if (i++ > 1000000) { 111 if (i++ > 1000000) {
112 BUG_ON(1);
113 printk(KERN_ERR "%s: stalled rnic\n", 112 printk(KERN_ERR "%s: stalled rnic\n",
114 rdev_p->dev_name); 113 rdev_p->dev_name);
115 return -EIO; 114 return -EIO;
@@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
155 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); 154 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
156} 155}
157 156
158int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) 157int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
159{ 158{
160 struct rdma_cq_setup setup; 159 struct rdma_cq_setup setup;
161 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); 160 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
@@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
163 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); 162 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
164 if (!cq->cqid) 163 if (!cq->cqid)
165 return -ENOMEM; 164 return -ENOMEM;
166 cq->sw_queue = kzalloc(size, GFP_KERNEL); 165 if (kernel) {
167 if (!cq->sw_queue) 166 cq->sw_queue = kzalloc(size, GFP_KERNEL);
168 return -ENOMEM; 167 if (!cq->sw_queue)
169 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), 168 return -ENOMEM;
170 (1UL << (cq->size_log2)) * 169 }
171 sizeof(struct t3_cqe), 170 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
172 &(cq->dma_addr), GFP_KERNEL); 171 &(cq->dma_addr), GFP_KERNEL);
173 if (!cq->queue) { 172 if (!cq->queue) {
174 kfree(cq->sw_queue); 173 kfree(cq->sw_queue);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index f3d440cc68f2..073373c2c560 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) 55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
56#define T3_MAX_CQ_DEPTH 8192 56#define T3_MAX_CQ_DEPTH 262144
57#define T3_MAX_NUM_STAG (1<<15) 57#define T3_MAX_NUM_STAG (1<<15)
58#define T3_MAX_MR_SIZE 0x100000000ULL 58#define T3_MAX_MR_SIZE 0x100000000ULL
59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
157void cxio_rdev_close(struct cxio_rdev *rdev); 157void cxio_rdev_close(struct cxio_rdev *rdev);
158int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 158int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
159 enum t3_cq_opcode op, u32 credit); 159 enum t3_cq_opcode op, u32 credit);
160int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 160int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
161int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 161int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
162int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 162int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
163void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx); 163void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index a197a5b7ac7f..15073b2da1c5 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -730,7 +730,22 @@ struct t3_cq {
730 730
731static inline void cxio_set_wq_in_error(struct t3_wq *wq) 731static inline void cxio_set_wq_in_error(struct t3_wq *wq)
732{ 732{
733 wq->queue->wq_in_err.err = 1; 733 wq->queue->wq_in_err.err |= 1;
734}
735
736static inline void cxio_disable_wq_db(struct t3_wq *wq)
737{
738 wq->queue->wq_in_err.err |= 2;
739}
740
741static inline void cxio_enable_wq_db(struct t3_wq *wq)
742{
743 wq->queue->wq_in_err.err &= ~2;
744}
745
746static inline int cxio_wq_db_enabled(struct t3_wq *wq)
747{
748 return !(wq->queue->wq_in_err.err & 2);
734} 749}
735 750
736static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) 751static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index b0ea0105ddf6..ee1d8b4d4541 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = {
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
66static DEFINE_MUTEX(dev_mutex); 66static DEFINE_MUTEX(dev_mutex);
67 67
68static int disable_qp_db(int id, void *p, void *data)
69{
70 struct iwch_qp *qhp = p;
71
72 cxio_disable_wq_db(&qhp->wq);
73 return 0;
74}
75
76static int enable_qp_db(int id, void *p, void *data)
77{
78 struct iwch_qp *qhp = p;
79
80 if (data)
81 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
82 cxio_enable_wq_db(&qhp->wq);
83 return 0;
84}
85
86static void disable_dbs(struct iwch_dev *rnicp)
87{
88 spin_lock_irq(&rnicp->lock);
89 idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
90 spin_unlock_irq(&rnicp->lock);
91}
92
93static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
94{
95 spin_lock_irq(&rnicp->lock);
96 idr_for_each(&rnicp->qpidr, enable_qp_db,
97 (void *)(unsigned long)ring_db);
98 spin_unlock_irq(&rnicp->lock);
99}
100
101static void iwch_db_drop_task(struct work_struct *work)
102{
103 struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
104 db_drop_task.work);
105 enable_dbs(rnicp, 1);
106}
107
68static void rnic_init(struct iwch_dev *rnicp) 108static void rnic_init(struct iwch_dev *rnicp)
69{ 109{
70 PDBG("%s iwch_dev %p\n", __func__, rnicp); 110 PDBG("%s iwch_dev %p\n", __func__, rnicp);
@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp)
72 idr_init(&rnicp->qpidr); 112 idr_init(&rnicp->qpidr);
73 idr_init(&rnicp->mmidr); 113 idr_init(&rnicp->mmidr);
74 spin_lock_init(&rnicp->lock); 114 spin_lock_init(&rnicp->lock);
115 INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
75 116
76 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; 117 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
77 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; 118 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
@@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev)
147 mutex_lock(&dev_mutex); 188 mutex_lock(&dev_mutex);
148 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 189 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
149 if (dev->rdev.t3cdev_p == tdev) { 190 if (dev->rdev.t3cdev_p == tdev) {
191 dev->rdev.flags = CXIO_ERROR_FATAL;
192 cancel_delayed_work_sync(&dev->db_drop_task);
150 list_del(&dev->entry); 193 list_del(&dev->entry);
151 iwch_unregister_device(dev); 194 iwch_unregister_device(dev);
152 cxio_rdev_close(&dev->rdev); 195 cxio_rdev_close(&dev->rdev);
@@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
165 struct cxio_rdev *rdev = tdev->ulp; 208 struct cxio_rdev *rdev = tdev->ulp;
166 struct iwch_dev *rnicp; 209 struct iwch_dev *rnicp;
167 struct ib_event event; 210 struct ib_event event;
168 u32 portnum = port_id + 1; 211 u32 portnum = port_id + 1;
212 int dispatch = 0;
169 213
170 if (!rdev) 214 if (!rdev)
171 return; 215 return;
@@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
174 case OFFLOAD_STATUS_DOWN: { 218 case OFFLOAD_STATUS_DOWN: {
175 rdev->flags = CXIO_ERROR_FATAL; 219 rdev->flags = CXIO_ERROR_FATAL;
176 event.event = IB_EVENT_DEVICE_FATAL; 220 event.event = IB_EVENT_DEVICE_FATAL;
221 dispatch = 1;
177 break; 222 break;
178 } 223 }
179 case OFFLOAD_PORT_DOWN: { 224 case OFFLOAD_PORT_DOWN: {
180 event.event = IB_EVENT_PORT_ERR; 225 event.event = IB_EVENT_PORT_ERR;
226 dispatch = 1;
181 break; 227 break;
182 } 228 }
183 case OFFLOAD_PORT_UP: { 229 case OFFLOAD_PORT_UP: {
184 event.event = IB_EVENT_PORT_ACTIVE; 230 event.event = IB_EVENT_PORT_ACTIVE;
231 dispatch = 1;
232 break;
233 }
234 case OFFLOAD_DB_FULL: {
235 disable_dbs(rnicp);
236 break;
237 }
238 case OFFLOAD_DB_EMPTY: {
239 enable_dbs(rnicp, 1);
240 break;
241 }
242 case OFFLOAD_DB_DROP: {
243 unsigned long delay = 1000;
244 unsigned short r;
245
246 disable_dbs(rnicp);
247 get_random_bytes(&r, 2);
248 delay += r & 1023;
249
250 /*
251 * delay is between 1000-2023 usecs.
252 */
253 schedule_delayed_work(&rnicp->db_drop_task,
254 usecs_to_jiffies(delay));
185 break; 255 break;
186 } 256 }
187 } 257 }
188 258
189 event.device = &rnicp->ibdev; 259 if (dispatch) {
190 event.element.port_num = portnum; 260 event.device = &rnicp->ibdev;
191 ib_dispatch_event(&event); 261 event.element.port_num = portnum;
262 ib_dispatch_event(&event);
263 }
192 264
193 return; 265 return;
194} 266}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 84735506333f..a1c44578e039 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -36,6 +36,7 @@
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/workqueue.h>
39 40
40#include <rdma/ib_verbs.h> 41#include <rdma/ib_verbs.h>
41 42
@@ -110,6 +111,7 @@ struct iwch_dev {
110 struct idr mmidr; 111 struct idr mmidr;
111 spinlock_t lock; 112 spinlock_t lock;
112 struct list_head entry; 113 struct list_head entry;
114 struct delayed_work db_drop_task;
113}; 115};
114 116
115static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) 117static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ed7175549ebd..47b35c6608d2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
187 entries = roundup_pow_of_two(entries); 187 entries = roundup_pow_of_two(entries);
188 chp->cq.size_log2 = ilog2(entries); 188 chp->cq.size_log2 = ilog2(entries);
189 189
190 if (cxio_create_cq(&rhp->rdev, &chp->cq)) { 190 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
191 kfree(chp); 191 kfree(chp);
192 return ERR_PTR(-ENOMEM); 192 return ERR_PTR(-ENOMEM);
193 } 193 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cecf81d7..b4d893de3650 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
452 ++(qhp->wq.sq_wptr); 452 ++(qhp->wq.sq_wptr);
453 } 453 }
454 spin_unlock_irqrestore(&qhp->lock, flag); 454 spin_unlock_irqrestore(&qhp->lock, flag);
455 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 455 if (cxio_wq_db_enabled(&qhp->wq))
456 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
456 457
457out: 458out:
458 if (err) 459 if (err)
@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
514 num_wrs--; 515 num_wrs--;
515 } 516 }
516 spin_unlock_irqrestore(&qhp->lock, flag); 517 spin_unlock_irqrestore(&qhp->lock, flag);
517 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 518 if (cxio_wq_db_enabled(&qhp->wq))
519 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
518 520
519out: 521out:
520 if (err) 522 if (err)
@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp,
597 ++(qhp->wq.sq_wptr); 599 ++(qhp->wq.sq_wptr);
598 spin_unlock_irqrestore(&qhp->lock, flag); 600 spin_unlock_irqrestore(&qhp->lock, flag);
599 601
600 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 602 if (cxio_wq_db_enabled(&qhp->wq))
603 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
601 604
602 return err; 605 return err;
603} 606}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 42be0b15084b..b2b6fea2b141 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -548,11 +548,10 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
548 struct ehca_eq *eq = &shca->eq; 548 struct ehca_eq *eq = &shca->eq;
549 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache; 549 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
550 u64 eqe_value, ret; 550 u64 eqe_value, ret;
551 unsigned long flags;
552 int eqe_cnt, i; 551 int eqe_cnt, i;
553 int eq_empty = 0; 552 int eq_empty = 0;
554 553
555 spin_lock_irqsave(&eq->irq_spinlock, flags); 554 spin_lock(&eq->irq_spinlock);
556 if (is_irq) { 555 if (is_irq) {
557 const int max_query_cnt = 100; 556 const int max_query_cnt = 100;
558 int query_cnt = 0; 557 int query_cnt = 0;
@@ -643,7 +642,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
643 } while (1); 642 } while (1);
644 643
645unlock_irq_spinlock: 644unlock_irq_spinlock:
646 spin_unlock_irqrestore(&eq->irq_spinlock, flags); 645 spin_unlock(&eq->irq_spinlock);
647} 646}
648 647
649void ehca_tasklet_eq(unsigned long data) 648void ehca_tasklet_eq(unsigned long data)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 0338f1fabe8a..b105f664d3ef 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -55,9 +55,7 @@ static struct kmem_cache *qp_cache;
55/* 55/*
56 * attributes not supported by query qp 56 * attributes not supported by query qp
57 */ 57 */
58#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \ 58#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
59 IB_QP_MAX_QP_RD_ATOMIC | \
60 IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY) 59 IB_QP_EN_SQD_ASYNC_NOTIFY)
62 60
63/* 61/*
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 8c1213f8916a..dba8f9f8b996 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -222,7 +222,7 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
222{ 222{
223 int ret; 223 int ret;
224 224
225 if (!port_num || port_num > ibdev->phys_port_cnt) 225 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
226 return IB_MAD_RESULT_FAILURE; 226 return IB_MAD_RESULT_FAILURE;
227 227
228 /* accept only pma request */ 228 /* accept only pma request */
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 82878e348627..eb7d59abd12d 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -59,8 +59,7 @@ static int __get_user_pages(unsigned long start_page, size_t num_pages,
59 size_t got; 59 size_t got;
60 int ret; 60 int ret;
61 61
62 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> 62 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
63 PAGE_SHIFT;
64 63
65 if (num_pages > lock_limit) { 64 if (num_pages > lock_limit) {
66 ret = -ENOMEM; 65 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2a97c964b9ef..ae75389937d6 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1214,7 +1214,7 @@ out:
1214static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1214static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1215 void *wqe, unsigned *mlx_seg_len) 1215 void *wqe, unsigned *mlx_seg_len)
1216{ 1216{
1217 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; 1217 struct ib_device *ib_dev = sqp->qp.ibqp.device;
1218 struct mlx4_wqe_mlx_seg *mlx = wqe; 1218 struct mlx4_wqe_mlx_seg *mlx = wqe;
1219 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 1219 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1220 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 1220 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
@@ -1228,7 +1228,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1228 for (i = 0; i < wr->num_sge; ++i) 1228 for (i = 0; i < wr->num_sge; ++i)
1229 send_size += wr->sg_list[i].length; 1229 send_size += wr->sg_list[i].length;
1230 1230
1231 ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header); 1231 ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
1232 1232
1233 sqp->ud_header.lrh.service_level = 1233 sqp->ud_header.lrh.service_level =
1234 be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28; 1234 be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index c10576fa60c1..d2d172e6289c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1494,7 +1494,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1494 u16 pkey; 1494 u16 pkey;
1495 1495
1496 ib_ud_header_init(256, /* assume a MAD */ 1496 ib_ud_header_init(256, /* assume a MAD */
1497 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 1497 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
1498 &sqp->ud_header); 1498 &sqp->ud_header);
1499 1499
1500 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); 1500 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b9d09bafd6c1..4272c52e38a4 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -110,6 +110,7 @@ static unsigned int sysfs_idx_addr;
110 110
111static struct pci_device_id nes_pci_table[] = { 111static struct pci_device_id nes_pci_table[] = {
112 {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID}, 112 {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
113 {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
113 {0} 114 {0}
114}; 115};
115 116
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 98840564bb2f..cc78fee1dd51 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -64,8 +64,9 @@
64 * NetEffect PCI vendor id and NE010 PCI device id. 64 * NetEffect PCI vendor id and NE010 PCI device id.
65 */ 65 */
66#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */ 66#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
67#define PCI_VENDOR_ID_NETEFFECT 0x1678 67#define PCI_VENDOR_ID_NETEFFECT 0x1678
68#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100 68#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
69#define PCI_DEVICE_ID_NETEFFECT_NE020_KR 0x0110
69#endif 70#endif
70 71
71#define NE020_REV 4 72#define NE020_REV 4
@@ -193,8 +194,8 @@ extern u32 cm_packets_created;
193extern u32 cm_packets_received; 194extern u32 cm_packets_received;
194extern u32 cm_packets_dropped; 195extern u32 cm_packets_dropped;
195extern u32 cm_packets_retrans; 196extern u32 cm_packets_retrans;
196extern u32 cm_listens_created; 197extern atomic_t cm_listens_created;
197extern u32 cm_listens_destroyed; 198extern atomic_t cm_listens_destroyed;
198extern u32 cm_backlog_drops; 199extern u32 cm_backlog_drops;
199extern atomic_t cm_loopbacks; 200extern atomic_t cm_loopbacks;
200extern atomic_t cm_nodes_created; 201extern atomic_t cm_nodes_created;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 39468c277036..2a49ee40b520 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -67,8 +67,8 @@ u32 cm_packets_dropped;
67u32 cm_packets_retrans; 67u32 cm_packets_retrans;
68u32 cm_packets_created; 68u32 cm_packets_created;
69u32 cm_packets_received; 69u32 cm_packets_received;
70u32 cm_listens_created; 70atomic_t cm_listens_created;
71u32 cm_listens_destroyed; 71atomic_t cm_listens_destroyed;
72u32 cm_backlog_drops; 72u32 cm_backlog_drops;
73atomic_t cm_loopbacks; 73atomic_t cm_loopbacks;
74atomic_t cm_nodes_created; 74atomic_t cm_nodes_created;
@@ -1011,9 +1011,10 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
1011 event.cm_info.loc_port = 1011 event.cm_info.loc_port =
1012 loopback->loc_port; 1012 loopback->loc_port;
1013 event.cm_info.cm_id = loopback->cm_id; 1013 event.cm_info.cm_id = loopback->cm_id;
1014 add_ref_cm_node(loopback);
1015 loopback->state = NES_CM_STATE_CLOSED;
1014 cm_event_connect_error(&event); 1016 cm_event_connect_error(&event);
1015 cm_node->state = NES_CM_STATE_LISTENER_DESTROYED; 1017 cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
1016 loopback->state = NES_CM_STATE_CLOSED;
1017 1018
1018 rem_ref_cm_node(cm_node->cm_core, 1019 rem_ref_cm_node(cm_node->cm_core,
1019 cm_node); 1020 cm_node);
@@ -1042,7 +1043,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
1042 kfree(listener); 1043 kfree(listener);
1043 listener = NULL; 1044 listener = NULL;
1044 ret = 0; 1045 ret = 0;
1045 cm_listens_destroyed++; 1046 atomic_inc(&cm_listens_destroyed);
1046 } else { 1047 } else {
1047 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1048 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1048 } 1049 }
@@ -3172,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
3172 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); 3173 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
3173 return err; 3174 return err;
3174 } 3175 }
3175 cm_listens_created++; 3176 atomic_inc(&cm_listens_created);
3176 } 3177 }
3177 3178
3178 cm_id->add_ref(cm_id); 3179 cm_id->add_ref(cm_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b1c2cbb88f09..ce7f53833577 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -748,16 +748,28 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
748 748
749 if (hw_rev != NE020_REV) { 749 if (hw_rev != NE020_REV) {
750 /* init serdes 0 */ 750 /* init serdes 0 */
751 if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4)) 751 switch (nesadapter->phy_type[0]) {
752 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA); 752 case NES_PHY_TYPE_CX4:
753 else 753 if (wide_ppm_offset)
754 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
755 else
756 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
757 break;
758 case NES_PHY_TYPE_KR:
759 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
760 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
761 break;
762 case NES_PHY_TYPE_PUMA_1G:
754 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); 763 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
755
756 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
757 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); 764 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
758 sds |= 0x00000100; 765 sds |= 0x00000100;
759 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds); 766 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
767 break;
768 default:
769 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
770 break;
760 } 771 }
772
761 if (!OneG_Mode) 773 if (!OneG_Mode)
762 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); 774 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
763 775
@@ -778,6 +790,9 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
778 if (wide_ppm_offset) 790 if (wide_ppm_offset)
779 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA); 791 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
780 break; 792 break;
793 case NES_PHY_TYPE_KR:
794 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
795 break;
781 case NES_PHY_TYPE_PUMA_1G: 796 case NES_PHY_TYPE_PUMA_1G:
782 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); 797 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
783 sds |= 0x000000100; 798 sds |= 0x000000100;
@@ -1279,115 +1294,115 @@ int nes_destroy_cqp(struct nes_device *nesdev)
1279 1294
1280 1295
1281/** 1296/**
1282 * nes_init_phy 1297 * nes_init_1g_phy
1283 */ 1298 */
1284int nes_init_phy(struct nes_device *nesdev) 1299int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1285{ 1300{
1286 struct nes_adapter *nesadapter = nesdev->nesadapter;
1287 u32 counter = 0; 1301 u32 counter = 0;
1288 u32 sds;
1289 u32 mac_index = nesdev->mac_index;
1290 u32 tx_config = 0;
1291 u16 phy_data; 1302 u16 phy_data;
1292 u32 temp_phy_data = 0; 1303 int ret = 0;
1293 u32 temp_phy_data2 = 0;
1294 u8 phy_type = nesadapter->phy_type[mac_index];
1295 u8 phy_index = nesadapter->phy_index[mac_index];
1296
1297 if ((nesadapter->OneG_Mode) &&
1298 (phy_type != NES_PHY_TYPE_PUMA_1G)) {
1299 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
1300 if (phy_type == NES_PHY_TYPE_1G) {
1301 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1302 tx_config &= 0xFFFFFFE3;
1303 tx_config |= 0x04;
1304 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1305 }
1306 1304
1307 nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data); 1305 nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
1308 nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000); 1306 nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
1309 1307
1310 /* Reset the PHY */ 1308 /* Reset the PHY */
1311 nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000); 1309 nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
1312 udelay(100); 1310 udelay(100);
1313 counter = 0; 1311 counter = 0;
1314 do { 1312 do {
1315 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1316 if (counter++ > 100)
1317 break;
1318 } while (phy_data & 0x8000);
1319
1320 /* Setting no phy loopback */
1321 phy_data &= 0xbfff;
1322 phy_data |= 0x1140;
1323 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1324 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1313 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1325 nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data); 1314 if (counter++ > 100) {
1326 nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data); 1315 ret = -1;
1327 1316 break;
1328 /* Setting the interrupt mask */ 1317 }
1329 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); 1318 } while (phy_data & 0x8000);
1330 nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee); 1319
1331 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); 1320 /* Setting no phy loopback */
1321 phy_data &= 0xbfff;
1322 phy_data |= 0x1140;
1323 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1324 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1325 nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
1326 nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
1327
1328 /* Setting the interrupt mask */
1329 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
1330 nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
1331 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
1332
1333 /* turning on flow control */
1334 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1335 nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
1336 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1337
1338 /* Clear Half duplex */
1339 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
1340 nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
1341 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
1342
1343 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1344 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
1345
1346 return ret;
1347}
1332 1348
1333 /* turning on flow control */
1334 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1335 nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
1336 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1337 1349
1338 /* Clear Half duplex */ 1350/**
1339 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); 1351 * nes_init_2025_phy
1340 nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100)); 1352 */
1341 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); 1353int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1354{
1355 u32 temp_phy_data = 0;
1356 u32 temp_phy_data2 = 0;
1357 u32 counter = 0;
1358 u32 sds;
1359 u32 mac_index = nesdev->mac_index;
1360 int ret = 0;
1361 unsigned int first_attempt = 1;
1342 1362
1343 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1363 /* Check firmware heartbeat */
1344 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300); 1364 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1365 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1366 udelay(1500);
1367 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1368 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1345 1369
1346 return 0; 1370 if (temp_phy_data != temp_phy_data2) {
1371 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
1372 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1373 if ((temp_phy_data & 0xff) > 0x20)
1374 return 0;
1375 printk(PFX "Reinitialize external PHY\n");
1347 } 1376 }
1348 1377
1349 if ((phy_type == NES_PHY_TYPE_IRIS) || 1378 /* no heartbeat, configure the PHY */
1350 (phy_type == NES_PHY_TYPE_ARGUS) || 1379 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
1351 (phy_type == NES_PHY_TYPE_SFP_D)) { 1380 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
1352 /* setup 10G MDIO operation */ 1381 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1353 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1382 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1354 tx_config &= 0xFFFFFFE3;
1355 tx_config |= 0x15;
1356 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1357 }
1358 if ((phy_type == NES_PHY_TYPE_ARGUS) ||
1359 (phy_type == NES_PHY_TYPE_SFP_D)) {
1360 u32 first_time = 1;
1361 1383
1362 /* Check firmware heartbeat */ 1384 switch (phy_type) {
1363 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1385 case NES_PHY_TYPE_ARGUS:
1364 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1386 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1365 udelay(1500); 1387 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1366 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1388 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
1367 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1389 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
1390 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
1391 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
1392 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
1368 1393
1369 if (temp_phy_data != temp_phy_data2) { 1394 /* setup LEDs */
1370 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); 1395 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
1371 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1396 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
1372 if ((temp_phy_data & 0xff) > 0x20) 1397 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
1373 return 0; 1398 break;
1374 printk(PFX "Reinitializing PHY\n");
1375 }
1376 1399
1377 /* no heartbeat, configure the PHY */ 1400 case NES_PHY_TYPE_SFP_D:
1378 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
1379 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
1380 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); 1401 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1381 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); 1402 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1382 if (phy_type == NES_PHY_TYPE_ARGUS) { 1403 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
1383 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); 1404 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
1384 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008); 1405 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
1385 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
1386 } else {
1387 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
1388 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
1389 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
1390 }
1391 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); 1406 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
1392 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); 1407 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
1393 1408
@@ -1395,71 +1410,136 @@ int nes_init_phy(struct nes_device *nesdev)
1395 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); 1410 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
1396 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); 1411 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
1397 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); 1412 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
1413 break;
1414
1415 case NES_PHY_TYPE_KR:
1416 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1417 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1418 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
1419 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010);
1420 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
1421 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080);
1422 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
1423
1424 /* setup LEDs */
1425 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B);
1426 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003);
1427 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004);
1398 1428
1399 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528); 1429 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D);
1430 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020);
1431 break;
1432 }
1433
1434 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
1400 1435
1401 /* Bring PHY out of reset */ 1436 /* Bring PHY out of reset */
1402 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002); 1437 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
1403 1438
1404 /* Check for heartbeat */ 1439 /* Check for heartbeat */
1405 counter = 0; 1440 counter = 0;
1406 mdelay(690); 1441 mdelay(690);
1442 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1443 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1444 do {
1445 if (counter++ > 150) {
1446 printk(PFX "No PHY heartbeat\n");
1447 break;
1448 }
1449 mdelay(1);
1407 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1450 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1451 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1452 } while ((temp_phy_data2 == temp_phy_data));
1453
1454 /* wait for tracking */
1455 counter = 0;
1456 do {
1457 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
1408 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1458 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1409 do { 1459 if (counter++ > 300) {
1410 if (counter++ > 150) { 1460 if (((temp_phy_data & 0xff) == 0x0) && first_attempt) {
1411 printk(PFX "No PHY heartbeat\n"); 1461 first_attempt = 0;
1462 counter = 0;
1463 /* reset AMCC PHY and try again */
1464 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
1465 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
1466 continue;
1467 } else {
1468 ret = 1;
1412 break; 1469 break;
1413 } 1470 }
1414 mdelay(1); 1471 }
1415 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1472 mdelay(10);
1416 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1473 } while ((temp_phy_data & 0xff) < 0x30);
1417 } while ((temp_phy_data2 == temp_phy_data)); 1474
1418 1475 /* setup signal integrity */
1419 /* wait for tracking */ 1476 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
1420 counter = 0; 1477 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
1421 do { 1478 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
1422 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); 1479 if (phy_type == NES_PHY_TYPE_KR) {
1423 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1480 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C);
1424 if (counter++ > 300) { 1481 } else {
1425 if (((temp_phy_data & 0xff) == 0x0) && first_time) {
1426 first_time = 0;
1427 counter = 0;
1428 /* reset AMCC PHY and try again */
1429 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
1430 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
1431 continue;
1432 } else {
1433 printk(PFX "PHY did not track\n");
1434 break;
1435 }
1436 }
1437 mdelay(10);
1438 } while ((temp_phy_data & 0xff) < 0x30);
1439
1440 /* setup signal integrity */
1441 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
1442 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
1443 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
1444 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002); 1482 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
1445 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063); 1483 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
1484 }
1485
1486 /* reset serdes */
1487 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200);
1488 sds |= 0x1;
1489 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
1490 sds &= 0xfffffffe;
1491 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
1492
1493 counter = 0;
1494 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
1495 && (counter++ < 5000))
1496 ;
1497
1498 return ret;
1499}
1500
1446 1501
1447 /* reset serdes */ 1502/**
1448 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + 1503 * nes_init_phy
1449 mac_index * 0x200); 1504 */
1450 sds |= 0x1; 1505int nes_init_phy(struct nes_device *nesdev)
1451 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + 1506{
1452 mac_index * 0x200, sds); 1507 struct nes_adapter *nesadapter = nesdev->nesadapter;
1453 sds &= 0xfffffffe; 1508 u32 mac_index = nesdev->mac_index;
1454 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + 1509 u32 tx_config = 0;
1455 mac_index * 0x200, sds); 1510 unsigned long flags;
1456 1511 u8 phy_type = nesadapter->phy_type[mac_index];
1457 counter = 0; 1512 u8 phy_index = nesadapter->phy_index[mac_index];
1458 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) 1513 int ret = 0;
1459 && (counter++ < 5000)) 1514
1460 ; 1515 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1516 if (phy_type == NES_PHY_TYPE_1G) {
1517 /* setup 1G MDIO operation */
1518 tx_config &= 0xFFFFFFE3;
1519 tx_config |= 0x04;
1520 } else {
1521 /* setup 10G MDIO operation */
1522 tx_config &= 0xFFFFFFE3;
1523 tx_config |= 0x15;
1461 } 1524 }
1462 return 0; 1525 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1526
1527 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
1528
1529 switch (phy_type) {
1530 case NES_PHY_TYPE_1G:
1531 ret = nes_init_1g_phy(nesdev, phy_type, phy_index);
1532 break;
1533 case NES_PHY_TYPE_ARGUS:
1534 case NES_PHY_TYPE_SFP_D:
1535 case NES_PHY_TYPE_KR:
1536 ret = nes_init_2025_phy(nesdev, phy_type, phy_index);
1537 break;
1538 }
1539
1540 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
1541
1542 return ret;
1463} 1543}
1464 1544
1465 1545
@@ -2460,23 +2540,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2460 } 2540 }
2461 } else { 2541 } else {
2462 switch (nesadapter->phy_type[mac_index]) { 2542 switch (nesadapter->phy_type[mac_index]) {
2463 case NES_PHY_TYPE_IRIS:
2464 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2465 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2466 u32temp = 20;
2467 do {
2468 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2469 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2470 if ((phy_data == temp_phy_data) || (!(--u32temp)))
2471 break;
2472 temp_phy_data = phy_data;
2473 } while (1);
2474 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2475 __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
2476 break;
2477
2478 case NES_PHY_TYPE_ARGUS: 2543 case NES_PHY_TYPE_ARGUS:
2479 case NES_PHY_TYPE_SFP_D: 2544 case NES_PHY_TYPE_SFP_D:
2545 case NES_PHY_TYPE_KR:
2480 /* clear the alarms */ 2546 /* clear the alarms */
2481 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); 2547 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
2482 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); 2548 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
@@ -3352,8 +3418,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3352 u16 async_event_id; 3418 u16 async_event_id;
3353 u8 tcp_state; 3419 u8 tcp_state;
3354 u8 iwarp_state; 3420 u8 iwarp_state;
3355 int must_disconn = 1;
3356 int must_terminate = 0;
3357 struct ib_event ibevent; 3421 struct ib_event ibevent;
3358 3422
3359 nes_debug(NES_DBG_AEQ, "\n"); 3423 nes_debug(NES_DBG_AEQ, "\n");
@@ -3367,6 +3431,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3367 BUG_ON(!context); 3431 BUG_ON(!context);
3368 } 3432 }
3369 3433
3434 /* context is nesqp unless async_event_id == CQ ERROR */
3435 nesqp = (struct nes_qp *)(unsigned long)context;
3370 async_event_id = (u16)aeq_info; 3436 async_event_id = (u16)aeq_info;
3371 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; 3437 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3372 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; 3438 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
@@ -3378,8 +3444,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3378 3444
3379 switch (async_event_id) { 3445 switch (async_event_id) {
3380 case NES_AEQE_AEID_LLP_FIN_RECEIVED: 3446 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
3381 nesqp = (struct nes_qp *)(unsigned long)context;
3382
3383 if (nesqp->term_flags) 3447 if (nesqp->term_flags)
3384 return; /* Ignore it, wait for close complete */ 3448 return; /* Ignore it, wait for close complete */
3385 3449
@@ -3394,79 +3458,48 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3394 async_event_id, nesqp->last_aeq, tcp_state); 3458 async_event_id, nesqp->last_aeq, tcp_state);
3395 } 3459 }
3396 3460
3397 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || 3461 break;
3398 (nesqp->ibqp_state != IB_QPS_RTS)) {
3399 /* FIN Received but tcp state or IB state moved on,
3400 should expect a close complete */
3401 return;
3402 }
3403
3404 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3462 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3405 nesqp = (struct nes_qp *)(unsigned long)context;
3406 if (nesqp->term_flags) { 3463 if (nesqp->term_flags) {
3407 nes_terminate_done(nesqp, 0); 3464 nes_terminate_done(nesqp, 0);
3408 return; 3465 return;
3409 } 3466 }
3467 spin_lock_irqsave(&nesqp->lock, flags);
3468 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3469 spin_unlock_irqrestore(&nesqp->lock, flags);
3470 nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0);
3471 nes_cm_disconn(nesqp);
3472 break;
3410 3473
3411 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
3412 case NES_AEQE_AEID_RESET_SENT: 3474 case NES_AEQE_AEID_RESET_SENT:
3413 nesqp = (struct nes_qp *)(unsigned long)context; 3475 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3414 if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
3415 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3416 }
3417 spin_lock_irqsave(&nesqp->lock, flags); 3476 spin_lock_irqsave(&nesqp->lock, flags);
3418 nesqp->hw_iwarp_state = iwarp_state; 3477 nesqp->hw_iwarp_state = iwarp_state;
3419 nesqp->hw_tcp_state = tcp_state; 3478 nesqp->hw_tcp_state = tcp_state;
3420 nesqp->last_aeq = async_event_id; 3479 nesqp->last_aeq = async_event_id;
3421 3480 nesqp->hte_added = 0;
3422 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
3423 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
3424 nesqp->hte_added = 0;
3425 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
3426 }
3427
3428 if ((nesqp->ibqp_state == IB_QPS_RTS) &&
3429 ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
3430 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
3431 switch (nesqp->hw_iwarp_state) {
3432 case NES_AEQE_IWARP_STATE_RTS:
3433 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3434 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3435 break;
3436 case NES_AEQE_IWARP_STATE_TERMINATE:
3437 must_disconn = 0; /* terminate path takes care of disconn */
3438 if (nesqp->term_flags == 0)
3439 must_terminate = 1;
3440 break;
3441 }
3442 } else {
3443 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
3444 /* FIN Received but ib state not RTS,
3445 close complete will be on its way */
3446 must_disconn = 0;
3447 }
3448 }
3449 spin_unlock_irqrestore(&nesqp->lock, flags); 3481 spin_unlock_irqrestore(&nesqp->lock, flags);
3482 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
3483 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3484 nes_cm_disconn(nesqp);
3485 break;
3450 3486
3451 if (must_terminate) 3487 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
3452 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); 3488 if (atomic_read(&nesqp->close_timer_started))
3453 else if (must_disconn) { 3489 return;
3454 if (next_iwarp_state) { 3490 spin_lock_irqsave(&nesqp->lock, flags);
3455 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n", 3491 nesqp->hw_iwarp_state = iwarp_state;
3456 nesqp->hwqp.qp_id, next_iwarp_state); 3492 nesqp->hw_tcp_state = tcp_state;
3457 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); 3493 nesqp->last_aeq = async_event_id;
3458 } 3494 spin_unlock_irqrestore(&nesqp->lock, flags);
3459 nes_cm_disconn(nesqp); 3495 nes_cm_disconn(nesqp);
3460 }
3461 break; 3496 break;
3462 3497
3463 case NES_AEQE_AEID_TERMINATE_SENT: 3498 case NES_AEQE_AEID_TERMINATE_SENT:
3464 nesqp = (struct nes_qp *)(unsigned long)context;
3465 nes_terminate_send_fin(nesdev, nesqp, aeqe); 3499 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3466 break; 3500 break;
3467 3501
3468 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: 3502 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3469 nesqp = (struct nes_qp *)(unsigned long)context;
3470 nes_terminate_received(nesdev, nesqp, aeqe); 3503 nes_terminate_received(nesdev, nesqp, aeqe);
3471 break; 3504 break;
3472 3505
@@ -3480,7 +3513,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3480 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 3513 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3481 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: 3514 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3482 case NES_AEQE_AEID_AMP_TO_WRAP: 3515 case NES_AEQE_AEID_AMP_TO_WRAP:
3483 nesqp = (struct nes_qp *)(unsigned long)context; 3516 printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n",
3517 nesqp->hwqp.qp_id, async_event_id);
3484 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR); 3518 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
3485 break; 3519 break;
3486 3520
@@ -3488,7 +3522,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3488 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: 3522 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3489 case NES_AEQE_AEID_DDP_UBE_INVALID_MO: 3523 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3490 case NES_AEQE_AEID_DDP_UBE_INVALID_QN: 3524 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3491 nesqp = (struct nes_qp *)(unsigned long)context;
3492 if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) { 3525 if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
3493 aeq_info &= 0xffff0000; 3526 aeq_info &= 0xffff0000;
3494 aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE; 3527 aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
@@ -3530,7 +3563,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3530 case NES_AEQE_AEID_STAG_ZERO_INVALID: 3563 case NES_AEQE_AEID_STAG_ZERO_INVALID:
3531 case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST: 3564 case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
3532 case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: 3565 case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
3533 nesqp = (struct nes_qp *)(unsigned long)context; 3566 printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n",
3567 nesqp->hwqp.qp_id, async_event_id);
3534 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); 3568 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3535 break; 3569 break;
3536 3570
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 084be0ee689b..9b1e7f869d83 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -37,12 +37,12 @@
37 37
38#define NES_PHY_TYPE_CX4 1 38#define NES_PHY_TYPE_CX4 1
39#define NES_PHY_TYPE_1G 2 39#define NES_PHY_TYPE_1G 2
40#define NES_PHY_TYPE_IRIS 3
41#define NES_PHY_TYPE_ARGUS 4 40#define NES_PHY_TYPE_ARGUS 4
42#define NES_PHY_TYPE_PUMA_1G 5 41#define NES_PHY_TYPE_PUMA_1G 5
43#define NES_PHY_TYPE_PUMA_10G 6 42#define NES_PHY_TYPE_PUMA_10G 6
44#define NES_PHY_TYPE_GLADIUS 7 43#define NES_PHY_TYPE_GLADIUS 7
45#define NES_PHY_TYPE_SFP_D 8 44#define NES_PHY_TYPE_SFP_D 8
45#define NES_PHY_TYPE_KR 9
46 46
47#define NES_MULTICAST_PF_MAX 8 47#define NES_MULTICAST_PF_MAX 8
48 48
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9384f5d3d33b..a1d79b6856ac 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1243,8 +1243,8 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1243 target_stat_values[++index] = cm_packets_received; 1243 target_stat_values[++index] = cm_packets_received;
1244 target_stat_values[++index] = cm_packets_dropped; 1244 target_stat_values[++index] = cm_packets_dropped;
1245 target_stat_values[++index] = cm_packets_retrans; 1245 target_stat_values[++index] = cm_packets_retrans;
1246 target_stat_values[++index] = cm_listens_created; 1246 target_stat_values[++index] = atomic_read(&cm_listens_created);
1247 target_stat_values[++index] = cm_listens_destroyed; 1247 target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
1248 target_stat_values[++index] = cm_backlog_drops; 1248 target_stat_values[++index] = cm_backlog_drops;
1249 target_stat_values[++index] = atomic_read(&cm_loopbacks); 1249 target_stat_values[++index] = atomic_read(&cm_loopbacks);
1250 target_stat_values[++index] = atomic_read(&cm_nodes_created); 1250 target_stat_values[++index] = atomic_read(&cm_nodes_created);
@@ -1474,9 +1474,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1474 } 1474 }
1475 return 0; 1475 return 0;
1476 } 1476 }
1477 if ((phy_type == NES_PHY_TYPE_IRIS) || 1477 if ((phy_type == NES_PHY_TYPE_ARGUS) ||
1478 (phy_type == NES_PHY_TYPE_ARGUS) || 1478 (phy_type == NES_PHY_TYPE_SFP_D) ||
1479 (phy_type == NES_PHY_TYPE_SFP_D)) { 1479 (phy_type == NES_PHY_TYPE_KR)) {
1480 et_cmd->transceiver = XCVR_EXTERNAL; 1480 et_cmd->transceiver = XCVR_EXTERNAL;
1481 et_cmd->port = PORT_FIBRE; 1481 et_cmd->port = PORT_FIBRE;
1482 et_cmd->supported = SUPPORTED_FIBRE; 1482 et_cmd->supported = SUPPORTED_FIBRE;
@@ -1596,8 +1596,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1596 struct net_device *netdev; 1596 struct net_device *netdev;
1597 struct nic_qp_map *curr_qp_map; 1597 struct nic_qp_map *curr_qp_map;
1598 u32 u32temp; 1598 u32 u32temp;
1599 u16 phy_data; 1599 u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
1600 u16 temp_phy_data;
1601 1600
1602 netdev = alloc_etherdev(sizeof(struct nes_vnic)); 1601 netdev = alloc_etherdev(sizeof(struct nes_vnic));
1603 if (!netdev) { 1602 if (!netdev) {
@@ -1705,65 +1704,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1705 1704
1706 if ((nesdev->netdev_count == 0) && 1705 if ((nesdev->netdev_count == 0) &&
1707 ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) || 1706 ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
1708 ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) && 1707 ((phy_type == NES_PHY_TYPE_PUMA_1G) &&
1709 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || 1708 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
1710 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { 1709 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
1711 /*
1712 * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
1713 * NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1)));
1714 */
1715 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1710 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1716 (0x200 * (nesdev->mac_index & 1))); 1711 (0x200 * (nesdev->mac_index & 1)));
1717 if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) { 1712 if (phy_type != NES_PHY_TYPE_PUMA_1G) {
1718 u32temp |= 0x00200000; 1713 u32temp |= 0x00200000;
1719 nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1714 nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1720 (0x200 * (nesdev->mac_index & 1)), u32temp); 1715 (0x200 * (nesdev->mac_index & 1)), u32temp);
1721 } 1716 }
1722 1717
1723 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1724 (0x200 * (nesdev->mac_index & 1)));
1725
1726 if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
1727 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) {
1728 nes_init_phy(nesdev);
1729 nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
1730 temp_phy_data = (u16)nes_read_indexed(nesdev,
1731 NES_IDX_MAC_MDIO_CONTROL);
1732 u32temp = 20;
1733 do {
1734 nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
1735 phy_data = (u16)nes_read_indexed(nesdev,
1736 NES_IDX_MAC_MDIO_CONTROL);
1737 if ((phy_data == temp_phy_data) || (!(--u32temp)))
1738 break;
1739 temp_phy_data = phy_data;
1740 } while (1);
1741 if (phy_data & 4) {
1742 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1743 nesvnic->linkup = 1;
1744 } else {
1745 nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
1746 }
1747 } else {
1748 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1749 nesvnic->linkup = 1;
1750 }
1751 } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
1752 nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n",
1753 nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn));
1754 if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) ||
1755 ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000))) {
1756 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1757 nesvnic->linkup = 1;
1758 }
1759 }
1760 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1718 /* clear the MAC interrupt status, assumes direct logical to physical mapping */
1761 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); 1719 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
1762 nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp); 1720 nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
1763 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp); 1721 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
1764 1722
1765 if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_IRIS) 1723 nes_init_phy(nesdev);
1766 nes_init_phy(nesdev);
1767 1724
1768 } 1725 }
1769 1726
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d3136e3747..815725f886c4 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -228,7 +228,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
228 /* Check for SQ overflow */ 228 /* Check for SQ overflow */
229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
230 spin_unlock_irqrestore(&nesqp->lock, flags); 230 spin_unlock_irqrestore(&nesqp->lock, flags);
231 return -EINVAL; 231 return -ENOMEM;
232 } 232 }
233 233
234 wqe = &nesqp->hwqp.sq_vbase[head]; 234 wqe = &nesqp->hwqp.sq_vbase[head];
@@ -3294,7 +3294,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3294 3294
3295 /* Check for SQ overflow */ 3295 /* Check for SQ overflow */
3296 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 3296 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
3297 err = -EINVAL; 3297 err = -ENOMEM;
3298 break; 3298 break;
3299 } 3299 }
3300 3300
@@ -3577,7 +3577,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3577 } 3577 }
3578 /* Check for RQ overflow */ 3578 /* Check for RQ overflow */
3579 if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) { 3579 if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
3580 err = -EINVAL; 3580 err = -ENOMEM;
3581 break; 3581 break;
3582 } 3582 }
3583 3583
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index e9795f60e5d6..d10b4ec68d28 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -55,9 +55,7 @@ static int ipoib_get_coalesce(struct net_device *dev,
55 struct ipoib_dev_priv *priv = netdev_priv(dev); 55 struct ipoib_dev_priv *priv = netdev_priv(dev);
56 56
57 coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs; 57 coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
58 coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
59 coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames; 58 coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
60 coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
61 59
62 return 0; 60 return 0;
63} 61}
@@ -69,10 +67,8 @@ static int ipoib_set_coalesce(struct net_device *dev,
69 int ret; 67 int ret;
70 68
71 /* 69 /*
72 * Since IPoIB uses a single CQ for both rx and tx, we assume 70 * These values are saved in the private data and returned
73 * that rx params dictate the configuration. These values are 71 * when ipoib_get_coalesce() is called
74 * saved in the private data and returned when ipoib_get_coalesce()
75 * is called.
76 */ 72 */
77 if (coal->rx_coalesce_usecs > 0xffff || 73 if (coal->rx_coalesce_usecs > 0xffff ||
78 coal->rx_max_coalesced_frames > 0xffff) 74 coal->rx_max_coalesced_frames > 0xffff)
@@ -85,8 +81,6 @@ static int ipoib_set_coalesce(struct net_device *dev,
85 return ret; 81 return ret;
86 } 82 }
87 83
88 coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
89 coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
90 priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs; 84 priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
91 priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames; 85 priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
92 86
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5f7a6fca0a4d..71237f8f78f7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -128,6 +128,28 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
128 return 0; 128 return 0;
129} 129}
130 130
131int iser_initialize_task_headers(struct iscsi_task *task,
132 struct iser_tx_desc *tx_desc)
133{
134 struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
135 struct iser_device *device = iser_conn->ib_conn->device;
136 struct iscsi_iser_task *iser_task = task->dd_data;
137 u64 dma_addr;
138
139 dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
140 ISER_HEADERS_LEN, DMA_TO_DEVICE);
141 if (ib_dma_mapping_error(device->ib_device, dma_addr))
142 return -ENOMEM;
143
144 tx_desc->dma_addr = dma_addr;
145 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
146 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
147 tx_desc->tx_sg[0].lkey = device->mr->lkey;
148
149 iser_task->headers_initialized = 1;
150 iser_task->iser_conn = iser_conn;
151 return 0;
152}
131/** 153/**
132 * iscsi_iser_task_init - Initialize task 154 * iscsi_iser_task_init - Initialize task
133 * @task: iscsi task 155 * @task: iscsi task
@@ -137,17 +159,17 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
137static int 159static int
138iscsi_iser_task_init(struct iscsi_task *task) 160iscsi_iser_task_init(struct iscsi_task *task)
139{ 161{
140 struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
141 struct iscsi_iser_task *iser_task = task->dd_data; 162 struct iscsi_iser_task *iser_task = task->dd_data;
142 163
164 if (!iser_task->headers_initialized)
165 if (iser_initialize_task_headers(task, &iser_task->desc))
166 return -ENOMEM;
167
143 /* mgmt task */ 168 /* mgmt task */
144 if (!task->sc) { 169 if (!task->sc)
145 iser_task->desc.data = task->data;
146 return 0; 170 return 0;
147 }
148 171
149 iser_task->command_sent = 0; 172 iser_task->command_sent = 0;
150 iser_task->iser_conn = iser_conn;
151 iser_task_rdma_init(iser_task); 173 iser_task_rdma_init(iser_task);
152 return 0; 174 return 0;
153} 175}
@@ -168,7 +190,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
168{ 190{
169 int error = 0; 191 int error = 0;
170 192
171 iser_dbg("task deq [cid %d itt 0x%x]\n", conn->id, task->itt); 193 iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
172 194
173 error = iser_send_control(conn, task); 195 error = iser_send_control(conn, task);
174 196
@@ -178,9 +200,6 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
178 * - if yes, the task is recycled at iscsi_complete_pdu 200 * - if yes, the task is recycled at iscsi_complete_pdu
179 * - if no, the task is recycled at iser_snd_completion 201 * - if no, the task is recycled at iser_snd_completion
180 */ 202 */
181 if (error && error != -ENOBUFS)
182 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
183
184 return error; 203 return error;
185} 204}
186 205
@@ -232,7 +251,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
232 task->imm_count, task->unsol_r2t.data_length); 251 task->imm_count, task->unsol_r2t.data_length);
233 } 252 }
234 253
235 iser_dbg("task deq [cid %d itt 0x%x]\n", 254 iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
236 conn->id, task->itt); 255 conn->id, task->itt);
237 256
238 /* Send the cmd PDU */ 257 /* Send the cmd PDU */
@@ -248,8 +267,6 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
248 error = iscsi_iser_task_xmit_unsol_data(conn, task); 267 error = iscsi_iser_task_xmit_unsol_data(conn, task);
249 268
250 iscsi_iser_task_xmit_exit: 269 iscsi_iser_task_xmit_exit:
251 if (error && error != -ENOBUFS)
252 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
253 return error; 270 return error;
254} 271}
255 272
@@ -283,7 +300,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
283 * due to issues with the login code re iser sematics 300 * due to issues with the login code re iser sematics
284 * this not set in iscsi_conn_setup - FIXME 301 * this not set in iscsi_conn_setup - FIXME
285 */ 302 */
286 conn->max_recv_dlength = 128; 303 conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
287 304
288 iser_conn = conn->dd_data; 305 iser_conn = conn->dd_data;
289 conn->dd_data = iser_conn; 306 conn->dd_data = iser_conn;
@@ -401,7 +418,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
401 struct Scsi_Host *shost; 418 struct Scsi_Host *shost;
402 struct iser_conn *ib_conn; 419 struct iser_conn *ib_conn;
403 420
404 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 1); 421 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
405 if (!shost) 422 if (!shost)
406 return NULL; 423 return NULL;
407 shost->transportt = iscsi_iser_scsi_transport; 424 shost->transportt = iscsi_iser_scsi_transport;
@@ -675,7 +692,7 @@ static int __init iser_init(void)
675 memset(&ig, 0, sizeof(struct iser_global)); 692 memset(&ig, 0, sizeof(struct iser_global));
676 693
677 ig.desc_cache = kmem_cache_create("iser_descriptors", 694 ig.desc_cache = kmem_cache_create("iser_descriptors",
678 sizeof (struct iser_desc), 695 sizeof(struct iser_tx_desc),
679 0, SLAB_HWCACHE_ALIGN, 696 0, SLAB_HWCACHE_ALIGN,
680 NULL); 697 NULL);
681 if (ig.desc_cache == NULL) 698 if (ig.desc_cache == NULL)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d529cae1f0d..036934cdcb92 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -102,9 +102,9 @@
102#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * 102#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
103 * SCSI_TMFUNC(2), LOGOUT(1) */ 103 * SCSI_TMFUNC(2), LOGOUT(1) */
104 104
105#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \ 105#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
106 ISER_MAX_RX_MISC_PDUS + \ 106
107 ISER_MAX_TX_MISC_PDUS) 107#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
108 108
109/* the max TX (send) WR supported by the iSER QP is defined by * 109/* the max TX (send) WR supported by the iSER QP is defined by *
110 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * 110 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -132,6 +132,12 @@ struct iser_hdr {
132 __be64 read_va; 132 __be64 read_va;
133} __attribute__((packed)); 133} __attribute__((packed));
134 134
135/* Constant PDU lengths calculations */
136#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
137
138#define ISER_RECV_DATA_SEG_LEN 128
139#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
140#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
135 141
136/* Length of an object name string */ 142/* Length of an object name string */
137#define ISER_OBJECT_NAME_SIZE 64 143#define ISER_OBJECT_NAME_SIZE 64
@@ -187,51 +193,43 @@ struct iser_regd_buf {
187 struct iser_mem_reg reg; /* memory registration info */ 193 struct iser_mem_reg reg; /* memory registration info */
188 void *virt_addr; 194 void *virt_addr;
189 struct iser_device *device; /* device->device for dma_unmap */ 195 struct iser_device *device; /* device->device for dma_unmap */
190 u64 dma_addr; /* if non zero, addr for dma_unmap */
191 enum dma_data_direction direction; /* direction for dma_unmap */ 196 enum dma_data_direction direction; /* direction for dma_unmap */
192 unsigned int data_size; 197 unsigned int data_size;
193 atomic_t ref_count; /* refcount, freed when dec to 0 */
194};
195
196#define MAX_REGD_BUF_VECTOR_LEN 2
197
198struct iser_dto {
199 struct iscsi_iser_task *task;
200 struct iser_conn *ib_conn;
201 int notify_enable;
202
203 /* vector of registered buffers */
204 unsigned int regd_vector_len;
205 struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN];
206
207 /* offset into the registered buffer may be specified */
208 unsigned int offset[MAX_REGD_BUF_VECTOR_LEN];
209
210 /* a smaller size may be specified, if 0, then full size is used */
211 unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
212}; 198};
213 199
214enum iser_desc_type { 200enum iser_desc_type {
215 ISCSI_RX,
216 ISCSI_TX_CONTROL , 201 ISCSI_TX_CONTROL ,
217 ISCSI_TX_SCSI_COMMAND, 202 ISCSI_TX_SCSI_COMMAND,
218 ISCSI_TX_DATAOUT 203 ISCSI_TX_DATAOUT
219}; 204};
220 205
221struct iser_desc { 206struct iser_tx_desc {
222 struct iser_hdr iser_header; 207 struct iser_hdr iser_header;
223 struct iscsi_hdr iscsi_header; 208 struct iscsi_hdr iscsi_header;
224 struct iser_regd_buf hdr_regd_buf;
225 void *data; /* used by RX & TX_CONTROL */
226 struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */
227 enum iser_desc_type type; 209 enum iser_desc_type type;
228 struct iser_dto dto; 210 u64 dma_addr;
211 /* sg[0] points to iser/iscsi headers, sg[1] optionally points to either
212 of immediate data, unsolicited data-out or control (login,text) */
213 struct ib_sge tx_sg[2];
214 int num_sge;
229}; 215};
230 216
217#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
218 sizeof(u64) + sizeof(struct ib_sge)))
219struct iser_rx_desc {
220 struct iser_hdr iser_header;
221 struct iscsi_hdr iscsi_header;
222 char data[ISER_RECV_DATA_SEG_LEN];
223 u64 dma_addr;
224 struct ib_sge rx_sg;
225 char pad[ISER_RX_PAD_SIZE];
226} __attribute__((packed));
227
231struct iser_device { 228struct iser_device {
232 struct ib_device *ib_device; 229 struct ib_device *ib_device;
233 struct ib_pd *pd; 230 struct ib_pd *pd;
234 struct ib_cq *cq; 231 struct ib_cq *rx_cq;
232 struct ib_cq *tx_cq;
235 struct ib_mr *mr; 233 struct ib_mr *mr;
236 struct tasklet_struct cq_tasklet; 234 struct tasklet_struct cq_tasklet;
237 struct list_head ig_list; /* entry in ig devices list */ 235 struct list_head ig_list; /* entry in ig devices list */
@@ -250,15 +248,18 @@ struct iser_conn {
250 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ 248 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
251 int disc_evt_flag; /* disconn event delivered */ 249 int disc_evt_flag; /* disconn event delivered */
252 wait_queue_head_t wait; /* waitq for conn/disconn */ 250 wait_queue_head_t wait; /* waitq for conn/disconn */
253 atomic_t post_recv_buf_count; /* posted rx count */ 251 int post_recv_buf_count; /* posted rx count */
254 atomic_t post_send_buf_count; /* posted tx count */ 252 atomic_t post_send_buf_count; /* posted tx count */
255 atomic_t unexpected_pdu_count;/* count of received *
256 * unexpected pdus *
257 * not yet retired */
258 char name[ISER_OBJECT_NAME_SIZE]; 253 char name[ISER_OBJECT_NAME_SIZE];
259 struct iser_page_vec *page_vec; /* represents SG to fmr maps* 254 struct iser_page_vec *page_vec; /* represents SG to fmr maps*
260 * maps serialized as tx is*/ 255 * maps serialized as tx is*/
261 struct list_head conn_list; /* entry in ig conn list */ 256 struct list_head conn_list; /* entry in ig conn list */
257
258 char *login_buf;
259 u64 login_dma;
260 unsigned int rx_desc_head;
261 struct iser_rx_desc *rx_descs;
262 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
262}; 263};
263 264
264struct iscsi_iser_conn { 265struct iscsi_iser_conn {
@@ -267,7 +268,7 @@ struct iscsi_iser_conn {
267}; 268};
268 269
269struct iscsi_iser_task { 270struct iscsi_iser_task {
270 struct iser_desc desc; 271 struct iser_tx_desc desc;
271 struct iscsi_iser_conn *iser_conn; 272 struct iscsi_iser_conn *iser_conn;
272 enum iser_task_status status; 273 enum iser_task_status status;
273 int command_sent; /* set if command sent */ 274 int command_sent; /* set if command sent */
@@ -275,6 +276,7 @@ struct iscsi_iser_task {
275 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ 276 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
276 struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ 277 struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
277 struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ 278 struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
279 int headers_initialized;
278}; 280};
279 281
280struct iser_page_vec { 282struct iser_page_vec {
@@ -322,22 +324,17 @@ void iser_conn_put(struct iser_conn *ib_conn);
322 324
323void iser_conn_terminate(struct iser_conn *ib_conn); 325void iser_conn_terminate(struct iser_conn *ib_conn);
324 326
325void iser_rcv_completion(struct iser_desc *desc, 327void iser_rcv_completion(struct iser_rx_desc *desc,
326 unsigned long dto_xfer_len); 328 unsigned long dto_xfer_len,
329 struct iser_conn *ib_conn);
327 330
328void iser_snd_completion(struct iser_desc *desc); 331void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
329 332
330void iser_task_rdma_init(struct iscsi_iser_task *task); 333void iser_task_rdma_init(struct iscsi_iser_task *task);
331 334
332void iser_task_rdma_finalize(struct iscsi_iser_task *task); 335void iser_task_rdma_finalize(struct iscsi_iser_task *task);
333 336
334void iser_dto_buffs_release(struct iser_dto *dto); 337void iser_free_rx_descriptors(struct iser_conn *ib_conn);
335
336int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
337
338void iser_reg_single(struct iser_device *device,
339 struct iser_regd_buf *regd_buf,
340 enum dma_data_direction direction);
341 338
342void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, 339void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
343 enum iser_data_dir cmd_dir); 340 enum iser_data_dir cmd_dir);
@@ -356,11 +353,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
356 353
357void iser_unreg_mem(struct iser_mem_reg *mem_reg); 354void iser_unreg_mem(struct iser_mem_reg *mem_reg);
358 355
359int iser_post_recv(struct iser_desc *rx_desc); 356int iser_post_recvl(struct iser_conn *ib_conn);
360int iser_post_send(struct iser_desc *tx_desc); 357int iser_post_recvm(struct iser_conn *ib_conn, int count);
361 358int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
362int iser_conn_state_comp(struct iser_conn *ib_conn,
363 enum iser_ib_conn_state comp);
364 359
365int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, 360int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
366 struct iser_data_buf *data, 361 struct iser_data_buf *data,
@@ -368,4 +363,6 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
368 enum dma_data_direction dma_dir); 363 enum dma_data_direction dma_dir);
369 364
370void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); 365void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
366int iser_initialize_task_headers(struct iscsi_task *task,
367 struct iser_tx_desc *tx_desc);
371#endif 368#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9de640200ad3..0b9ef0716588 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -39,29 +39,6 @@
39 39
40#include "iscsi_iser.h" 40#include "iscsi_iser.h"
41 41
42/* Constant PDU lengths calculations */
43#define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \
44 sizeof (struct iscsi_hdr))
45
46/* iser_dto_add_regd_buff - increments the reference count for *
47 * the registered buffer & adds it to the DTO object */
48static void iser_dto_add_regd_buff(struct iser_dto *dto,
49 struct iser_regd_buf *regd_buf,
50 unsigned long use_offset,
51 unsigned long use_size)
52{
53 int add_idx;
54
55 atomic_inc(&regd_buf->ref_count);
56
57 add_idx = dto->regd_vector_len;
58 dto->regd[add_idx] = regd_buf;
59 dto->used_sz[add_idx] = use_size;
60 dto->offset[add_idx] = use_offset;
61
62 dto->regd_vector_len++;
63}
64
65/* Register user buffer memory and initialize passive rdma 42/* Register user buffer memory and initialize passive rdma
66 * dto descriptor. Total data size is stored in 43 * dto descriptor. Total data size is stored in
67 * iser_task->data[ISER_DIR_IN].data_len 44 * iser_task->data[ISER_DIR_IN].data_len
@@ -122,9 +99,9 @@ iser_prepare_write_cmd(struct iscsi_task *task,
122 struct iscsi_iser_task *iser_task = task->dd_data; 99 struct iscsi_iser_task *iser_task = task->dd_data;
123 struct iser_regd_buf *regd_buf; 100 struct iser_regd_buf *regd_buf;
124 int err; 101 int err;
125 struct iser_dto *send_dto = &iser_task->desc.dto;
126 struct iser_hdr *hdr = &iser_task->desc.iser_header; 102 struct iser_hdr *hdr = &iser_task->desc.iser_header;
127 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; 103 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
104 struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
128 105
129 err = iser_dma_map_task_data(iser_task, 106 err = iser_dma_map_task_data(iser_task,
130 buf_out, 107 buf_out,
@@ -163,135 +140,100 @@ iser_prepare_write_cmd(struct iscsi_task *task,
163 if (imm_sz > 0) { 140 if (imm_sz > 0) {
164 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", 141 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
165 task->itt, imm_sz); 142 task->itt, imm_sz);
166 iser_dto_add_regd_buff(send_dto, 143 tx_dsg->addr = regd_buf->reg.va;
167 regd_buf, 144 tx_dsg->length = imm_sz;
168 0, 145 tx_dsg->lkey = regd_buf->reg.lkey;
169 imm_sz); 146 iser_task->desc.num_sge = 2;
170 } 147 }
171 148
172 return 0; 149 return 0;
173} 150}
174 151
175/** 152/* creates a new tx descriptor and adds header regd buffer */
176 * iser_post_receive_control - allocates, initializes and posts receive DTO. 153static void iser_create_send_desc(struct iser_conn *ib_conn,
177 */ 154 struct iser_tx_desc *tx_desc)
178static int iser_post_receive_control(struct iscsi_conn *conn)
179{ 155{
180 struct iscsi_iser_conn *iser_conn = conn->dd_data; 156 struct iser_device *device = ib_conn->device;
181 struct iser_desc *rx_desc;
182 struct iser_regd_buf *regd_hdr;
183 struct iser_regd_buf *regd_data;
184 struct iser_dto *recv_dto = NULL;
185 struct iser_device *device = iser_conn->ib_conn->device;
186 int rx_data_size, err;
187 int posts, outstanding_unexp_pdus;
188
189 /* for the login sequence we must support rx of upto 8K; login is done
190 * after conn create/bind (connect) and conn stop/bind (reconnect),
191 * what's common for both schemes is that the connection is not started
192 */
193 if (conn->c_stage != ISCSI_CONN_STARTED)
194 rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
195 else /* FIXME till user space sets conn->max_recv_dlength correctly */
196 rx_data_size = 128;
197
198 outstanding_unexp_pdus =
199 atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0);
200
201 /*
202 * in addition to the response buffer, replace those consumed by
203 * unexpected pdus.
204 */
205 for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) {
206 rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
207 if (rx_desc == NULL) {
208 iser_err("Failed to alloc desc for post recv %d\n",
209 posts);
210 err = -ENOMEM;
211 goto post_rx_cache_alloc_failure;
212 }
213 rx_desc->type = ISCSI_RX;
214 rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
215 if (rx_desc->data == NULL) {
216 iser_err("Failed to alloc data buf for post recv %d\n",
217 posts);
218 err = -ENOMEM;
219 goto post_rx_kmalloc_failure;
220 }
221
222 recv_dto = &rx_desc->dto;
223 recv_dto->ib_conn = iser_conn->ib_conn;
224 recv_dto->regd_vector_len = 0;
225 157
226 regd_hdr = &rx_desc->hdr_regd_buf; 158 ib_dma_sync_single_for_cpu(device->ib_device,
227 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 159 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
228 regd_hdr->device = device;
229 regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
230 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
231 160
232 iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); 161 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
233 162 tx_desc->iser_header.flags = ISER_VER;
234 iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
235 163
236 regd_data = &rx_desc->data_regd_buf; 164 tx_desc->num_sge = 1;
237 memset(regd_data, 0, sizeof(struct iser_regd_buf));
238 regd_data->device = device;
239 regd_data->virt_addr = rx_desc->data;
240 regd_data->data_size = rx_data_size;
241 165
242 iser_reg_single(device, regd_data, DMA_FROM_DEVICE); 166 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
167 tx_desc->tx_sg[0].lkey = device->mr->lkey;
168 iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
169 }
170}
243 171
244 iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
245 172
246 err = iser_post_recv(rx_desc); 173int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
247 if (err) { 174{
248 iser_err("Failed iser_post_recv for post %d\n", posts); 175 int i, j;
249 goto post_rx_post_recv_failure; 176 u64 dma_addr;
250 } 177 struct iser_rx_desc *rx_desc;
178 struct ib_sge *rx_sg;
179 struct iser_device *device = ib_conn->device;
180
181 ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
182 sizeof(struct iser_rx_desc), GFP_KERNEL);
183 if (!ib_conn->rx_descs)
184 goto rx_desc_alloc_fail;
185
186 rx_desc = ib_conn->rx_descs;
187
188 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
189 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
190 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
191 if (ib_dma_mapping_error(device->ib_device, dma_addr))
192 goto rx_desc_dma_map_failed;
193
194 rx_desc->dma_addr = dma_addr;
195
196 rx_sg = &rx_desc->rx_sg;
197 rx_sg->addr = rx_desc->dma_addr;
198 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
199 rx_sg->lkey = device->mr->lkey;
251 } 200 }
252 /* all posts successful */
253 return 0;
254 201
255post_rx_post_recv_failure: 202 ib_conn->rx_desc_head = 0;
256 iser_dto_buffs_release(recv_dto); 203 return 0;
257 kfree(rx_desc->data);
258post_rx_kmalloc_failure:
259 kmem_cache_free(ig.desc_cache, rx_desc);
260post_rx_cache_alloc_failure:
261 if (posts > 0) {
262 /*
263 * response buffer posted, but did not replace all unexpected
264 * pdu recv bufs. Ignore error, retry occurs next send
265 */
266 outstanding_unexp_pdus -= (posts - 1);
267 err = 0;
268 }
269 atomic_add(outstanding_unexp_pdus,
270 &iser_conn->ib_conn->unexpected_pdu_count);
271 204
272 return err; 205rx_desc_dma_map_failed:
206 rx_desc = ib_conn->rx_descs;
207 for (j = 0; j < i; j++, rx_desc++)
208 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
209 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
210 kfree(ib_conn->rx_descs);
211 ib_conn->rx_descs = NULL;
212rx_desc_alloc_fail:
213 iser_err("failed allocating rx descriptors / data buffers\n");
214 return -ENOMEM;
273} 215}
274 216
275/* creates a new tx descriptor and adds header regd buffer */ 217void iser_free_rx_descriptors(struct iser_conn *ib_conn)
276static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
277 struct iser_desc *tx_desc)
278{ 218{
279 struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf; 219 int i;
280 struct iser_dto *send_dto = &tx_desc->dto; 220 struct iser_rx_desc *rx_desc;
221 struct iser_device *device = ib_conn->device;
281 222
282 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 223 if (ib_conn->login_buf) {
283 regd_hdr->device = iser_conn->ib_conn->device; 224 ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
284 regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ 225 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
285 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; 226 kfree(ib_conn->login_buf);
227 }
286 228
287 send_dto->ib_conn = iser_conn->ib_conn; 229 if (!ib_conn->rx_descs)
288 send_dto->notify_enable = 1; 230 return;
289 send_dto->regd_vector_len = 0;
290 231
291 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); 232 rx_desc = ib_conn->rx_descs;
292 tx_desc->iser_header.flags = ISER_VER; 233 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
293 234 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
294 iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0); 235 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
236 kfree(ib_conn->rx_descs);
295} 237}
296 238
297/** 239/**
@@ -301,46 +243,23 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
301{ 243{
302 struct iscsi_iser_conn *iser_conn = conn->dd_data; 244 struct iscsi_iser_conn *iser_conn = conn->dd_data;
303 245
304 int i; 246 iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
305 /*
306 * FIXME this value should be declared to the target during login with
307 * the MaxOutstandingUnexpectedPDUs key when supported
308 */
309 int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
310
311 iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
312 247
313 /* Check that there is no posted recv or send buffers left - */ 248 /* Check that there is no posted recv or send buffers left - */
314 /* they must be consumed during the login phase */ 249 /* they must be consumed during the login phase */
315 BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0); 250 BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
316 BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); 251 BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
317 252
318 /* Initial post receive buffers */ 253 if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
319 for (i = 0; i < initial_post_recv_bufs_num; i++) { 254 return -ENOMEM;
320 if (iser_post_receive_control(conn) != 0) {
321 iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
322 i, conn);
323 return -ENOMEM;
324 }
325 }
326 iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn);
327 return 0;
328}
329 255
330static int 256 /* Initial post receive buffers */
331iser_check_xmit(struct iscsi_conn *conn, void *task) 257 if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
332{ 258 return -ENOMEM;
333 struct iscsi_iser_conn *iser_conn = conn->dd_data;
334 259
335 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
336 ISER_QP_MAX_REQ_DTOS) {
337 iser_dbg("%ld can't xmit task %p\n",jiffies,task);
338 return -ENOBUFS;
339 }
340 return 0; 260 return 0;
341} 261}
342 262
343
344/** 263/**
345 * iser_send_command - send command PDU 264 * iser_send_command - send command PDU
346 */ 265 */
@@ -349,27 +268,18 @@ int iser_send_command(struct iscsi_conn *conn,
349{ 268{
350 struct iscsi_iser_conn *iser_conn = conn->dd_data; 269 struct iscsi_iser_conn *iser_conn = conn->dd_data;
351 struct iscsi_iser_task *iser_task = task->dd_data; 270 struct iscsi_iser_task *iser_task = task->dd_data;
352 struct iser_dto *send_dto = NULL;
353 unsigned long edtl; 271 unsigned long edtl;
354 int err = 0; 272 int err;
355 struct iser_data_buf *data_buf; 273 struct iser_data_buf *data_buf;
356 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr; 274 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
357 struct scsi_cmnd *sc = task->sc; 275 struct scsi_cmnd *sc = task->sc;
358 276 struct iser_tx_desc *tx_desc = &iser_task->desc;
359 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
360 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
361 return -EPERM;
362 }
363 if (iser_check_xmit(conn, task))
364 return -ENOBUFS;
365 277
366 edtl = ntohl(hdr->data_length); 278 edtl = ntohl(hdr->data_length);
367 279
368 /* build the tx desc regd header and add it to the tx desc dto */ 280 /* build the tx desc regd header and add it to the tx desc dto */
369 iser_task->desc.type = ISCSI_TX_SCSI_COMMAND; 281 tx_desc->type = ISCSI_TX_SCSI_COMMAND;
370 send_dto = &iser_task->desc.dto; 282 iser_create_send_desc(iser_conn->ib_conn, tx_desc);
371 send_dto->task = iser_task;
372 iser_create_send_desc(iser_conn, &iser_task->desc);
373 283
374 if (hdr->flags & ISCSI_FLAG_CMD_READ) 284 if (hdr->flags & ISCSI_FLAG_CMD_READ)
375 data_buf = &iser_task->data[ISER_DIR_IN]; 285 data_buf = &iser_task->data[ISER_DIR_IN];
@@ -398,23 +308,13 @@ int iser_send_command(struct iscsi_conn *conn,
398 goto send_command_error; 308 goto send_command_error;
399 } 309 }
400 310
401 iser_reg_single(iser_conn->ib_conn->device,
402 send_dto->regd[0], DMA_TO_DEVICE);
403
404 if (iser_post_receive_control(conn) != 0) {
405 iser_err("post_recv failed!\n");
406 err = -ENOMEM;
407 goto send_command_error;
408 }
409
410 iser_task->status = ISER_TASK_STATUS_STARTED; 311 iser_task->status = ISER_TASK_STATUS_STARTED;
411 312
412 err = iser_post_send(&iser_task->desc); 313 err = iser_post_send(iser_conn->ib_conn, tx_desc);
413 if (!err) 314 if (!err)
414 return 0; 315 return 0;
415 316
416send_command_error: 317send_command_error:
417 iser_dto_buffs_release(send_dto);
418 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); 318 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
419 return err; 319 return err;
420} 320}
@@ -428,20 +328,13 @@ int iser_send_data_out(struct iscsi_conn *conn,
428{ 328{
429 struct iscsi_iser_conn *iser_conn = conn->dd_data; 329 struct iscsi_iser_conn *iser_conn = conn->dd_data;
430 struct iscsi_iser_task *iser_task = task->dd_data; 330 struct iscsi_iser_task *iser_task = task->dd_data;
431 struct iser_desc *tx_desc = NULL; 331 struct iser_tx_desc *tx_desc = NULL;
432 struct iser_dto *send_dto = NULL; 332 struct iser_regd_buf *regd_buf;
433 unsigned long buf_offset; 333 unsigned long buf_offset;
434 unsigned long data_seg_len; 334 unsigned long data_seg_len;
435 uint32_t itt; 335 uint32_t itt;
436 int err = 0; 336 int err = 0;
437 337 struct ib_sge *tx_dsg;
438 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
439 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
440 return -EPERM;
441 }
442
443 if (iser_check_xmit(conn, task))
444 return -ENOBUFS;
445 338
446 itt = (__force uint32_t)hdr->itt; 339 itt = (__force uint32_t)hdr->itt;
447 data_seg_len = ntoh24(hdr->dlength); 340 data_seg_len = ntoh24(hdr->dlength);
@@ -450,28 +343,25 @@ int iser_send_data_out(struct iscsi_conn *conn,
450 iser_dbg("%s itt %d dseg_len %d offset %d\n", 343 iser_dbg("%s itt %d dseg_len %d offset %d\n",
451 __func__,(int)itt,(int)data_seg_len,(int)buf_offset); 344 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
452 345
453 tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); 346 tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
454 if (tx_desc == NULL) { 347 if (tx_desc == NULL) {
455 iser_err("Failed to alloc desc for post dataout\n"); 348 iser_err("Failed to alloc desc for post dataout\n");
456 return -ENOMEM; 349 return -ENOMEM;
457 } 350 }
458 351
459 tx_desc->type = ISCSI_TX_DATAOUT; 352 tx_desc->type = ISCSI_TX_DATAOUT;
353 tx_desc->iser_header.flags = ISER_VER;
460 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); 354 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
461 355
462 /* build the tx desc regd header and add it to the tx desc dto */ 356 /* build the tx desc */
463 send_dto = &tx_desc->dto; 357 iser_initialize_task_headers(task, tx_desc);
464 send_dto->task = iser_task;
465 iser_create_send_desc(iser_conn, tx_desc);
466
467 iser_reg_single(iser_conn->ib_conn->device,
468 send_dto->regd[0], DMA_TO_DEVICE);
469 358
470 /* all data was registered for RDMA, we can use the lkey */ 359 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
471 iser_dto_add_regd_buff(send_dto, 360 tx_dsg = &tx_desc->tx_sg[1];
472 &iser_task->rdma_regd[ISER_DIR_OUT], 361 tx_dsg->addr = regd_buf->reg.va + buf_offset;
473 buf_offset, 362 tx_dsg->length = data_seg_len;
474 data_seg_len); 363 tx_dsg->lkey = regd_buf->reg.lkey;
364 tx_desc->num_sge = 2;
475 365
476 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { 366 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
477 iser_err("Offset:%ld & DSL:%ld in Data-Out " 367 iser_err("Offset:%ld & DSL:%ld in Data-Out "
@@ -485,12 +375,11 @@ int iser_send_data_out(struct iscsi_conn *conn,
485 itt, buf_offset, data_seg_len); 375 itt, buf_offset, data_seg_len);
486 376
487 377
488 err = iser_post_send(tx_desc); 378 err = iser_post_send(iser_conn->ib_conn, tx_desc);
489 if (!err) 379 if (!err)
490 return 0; 380 return 0;
491 381
492send_data_out_error: 382send_data_out_error:
493 iser_dto_buffs_release(send_dto);
494 kmem_cache_free(ig.desc_cache, tx_desc); 383 kmem_cache_free(ig.desc_cache, tx_desc);
495 iser_err("conn %p failed err %d\n",conn, err); 384 iser_err("conn %p failed err %d\n",conn, err);
496 return err; 385 return err;
@@ -501,64 +390,44 @@ int iser_send_control(struct iscsi_conn *conn,
501{ 390{
502 struct iscsi_iser_conn *iser_conn = conn->dd_data; 391 struct iscsi_iser_conn *iser_conn = conn->dd_data;
503 struct iscsi_iser_task *iser_task = task->dd_data; 392 struct iscsi_iser_task *iser_task = task->dd_data;
504 struct iser_desc *mdesc = &iser_task->desc; 393 struct iser_tx_desc *mdesc = &iser_task->desc;
505 struct iser_dto *send_dto = NULL;
506 unsigned long data_seg_len; 394 unsigned long data_seg_len;
507 int err = 0; 395 int err = 0;
508 struct iser_regd_buf *regd_buf;
509 struct iser_device *device; 396 struct iser_device *device;
510 unsigned char opcode;
511
512 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
513 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
514 return -EPERM;
515 }
516
517 if (iser_check_xmit(conn, task))
518 return -ENOBUFS;
519 397
520 /* build the tx desc regd header and add it to the tx desc dto */ 398 /* build the tx desc regd header and add it to the tx desc dto */
521 mdesc->type = ISCSI_TX_CONTROL; 399 mdesc->type = ISCSI_TX_CONTROL;
522 send_dto = &mdesc->dto; 400 iser_create_send_desc(iser_conn->ib_conn, mdesc);
523 send_dto->task = NULL;
524 iser_create_send_desc(iser_conn, mdesc);
525 401
526 device = iser_conn->ib_conn->device; 402 device = iser_conn->ib_conn->device;
527 403
528 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
529
530 data_seg_len = ntoh24(task->hdr->dlength); 404 data_seg_len = ntoh24(task->hdr->dlength);
531 405
532 if (data_seg_len > 0) { 406 if (data_seg_len > 0) {
533 regd_buf = &mdesc->data_regd_buf; 407 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
534 memset(regd_buf, 0, sizeof(struct iser_regd_buf)); 408 if (task != conn->login_task) {
535 regd_buf->device = device; 409 iser_err("data present on non login task!!!\n");
536 regd_buf->virt_addr = task->data; 410 goto send_control_error;
537 regd_buf->data_size = task->data_count; 411 }
538 iser_reg_single(device, regd_buf, 412 memcpy(iser_conn->ib_conn->login_buf, task->data,
539 DMA_TO_DEVICE); 413 task->data_count);
540 iser_dto_add_regd_buff(send_dto, regd_buf, 414 tx_dsg->addr = iser_conn->ib_conn->login_dma;
541 0, 415 tx_dsg->length = data_seg_len;
542 data_seg_len); 416 tx_dsg->lkey = device->mr->lkey;
417 mdesc->num_sge = 2;
543 } 418 }
544 419
545 opcode = task->hdr->opcode & ISCSI_OPCODE_MASK; 420 if (task == conn->login_task) {
546 421 err = iser_post_recvl(iser_conn->ib_conn);
547 /* post recv buffer for response if one is expected */ 422 if (err)
548 if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) {
549 if (iser_post_receive_control(conn) != 0) {
550 iser_err("post_rcv_buff failed!\n");
551 err = -ENOMEM;
552 goto send_control_error; 423 goto send_control_error;
553 }
554 } 424 }
555 425
556 err = iser_post_send(mdesc); 426 err = iser_post_send(iser_conn->ib_conn, mdesc);
557 if (!err) 427 if (!err)
558 return 0; 428 return 0;
559 429
560send_control_error: 430send_control_error:
561 iser_dto_buffs_release(send_dto);
562 iser_err("conn %p failed err %d\n",conn, err); 431 iser_err("conn %p failed err %d\n",conn, err);
563 return err; 432 return err;
564} 433}
@@ -566,104 +435,71 @@ send_control_error:
566/** 435/**
567 * iser_rcv_dto_completion - recv DTO completion 436 * iser_rcv_dto_completion - recv DTO completion
568 */ 437 */
569void iser_rcv_completion(struct iser_desc *rx_desc, 438void iser_rcv_completion(struct iser_rx_desc *rx_desc,
570 unsigned long dto_xfer_len) 439 unsigned long rx_xfer_len,
440 struct iser_conn *ib_conn)
571{ 441{
572 struct iser_dto *dto = &rx_desc->dto; 442 struct iscsi_iser_conn *conn = ib_conn->iser_conn;
573 struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
574 struct iscsi_task *task;
575 struct iscsi_iser_task *iser_task;
576 struct iscsi_hdr *hdr; 443 struct iscsi_hdr *hdr;
577 char *rx_data = NULL; 444 u64 rx_dma;
578 int rx_data_len = 0; 445 int rx_buflen, outstanding, count, err;
579 unsigned char opcode; 446
580 447 /* differentiate between login to all other PDUs */
581 hdr = &rx_desc->iscsi_header; 448 if ((char *)rx_desc == ib_conn->login_buf) {
449 rx_dma = ib_conn->login_dma;
450 rx_buflen = ISER_RX_LOGIN_SIZE;
451 } else {
452 rx_dma = rx_desc->dma_addr;
453 rx_buflen = ISER_RX_PAYLOAD_SIZE;
454 }
582 455
583 iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt); 456 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
457 rx_buflen, DMA_FROM_DEVICE);
584 458
585 if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */ 459 hdr = &rx_desc->iscsi_header;
586 rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
587 rx_data = dto->regd[1]->virt_addr;
588 rx_data += dto->offset[1];
589 }
590 460
591 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 461 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
592 462 hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
593 if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
594 spin_lock(&conn->iscsi_conn->session->lock);
595 task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
596 if (task)
597 __iscsi_get_task(task);
598 spin_unlock(&conn->iscsi_conn->session->lock);
599
600 if (!task)
601 iser_err("itt can't be matched to task!!! "
602 "conn %p opcode %d itt %d\n",
603 conn->iscsi_conn, opcode, hdr->itt);
604 else {
605 iser_task = task->dd_data;
606 iser_dbg("itt %d task %p\n",hdr->itt, task);
607 iser_task->status = ISER_TASK_STATUS_COMPLETED;
608 iser_task_rdma_finalize(iser_task);
609 iscsi_put_task(task);
610 }
611 }
612 iser_dto_buffs_release(dto);
613 463
614 iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); 464 iscsi_iser_recv(conn->iscsi_conn, hdr,
465 rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
615 466
616 kfree(rx_desc->data); 467 ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
617 kmem_cache_free(ig.desc_cache, rx_desc); 468 rx_buflen, DMA_FROM_DEVICE);
618 469
619 /* decrementing conn->post_recv_buf_count only --after-- freeing the * 470 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
620 * task eliminates the need to worry on tasks which are completed in * 471 * task eliminates the need to worry on tasks which are completed in *
621 * parallel to the execution of iser_conn_term. So the code that waits * 472 * parallel to the execution of iser_conn_term. So the code that waits *
622 * for the posted rx bufs refcount to become zero handles everything */ 473 * for the posted rx bufs refcount to become zero handles everything */
623 atomic_dec(&conn->ib_conn->post_recv_buf_count); 474 conn->ib_conn->post_recv_buf_count--;
624 475
625 /* 476 if (rx_dma == ib_conn->login_dma)
626 * if an unexpected PDU was received then the recv wr consumed must 477 return;
627 * be replaced, this is done in the next send of a control-type PDU 478
628 */ 479 outstanding = ib_conn->post_recv_buf_count;
629 if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { 480 if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
630 /* nop-in with itt = 0xffffffff */ 481 count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
631 atomic_inc(&conn->ib_conn->unexpected_pdu_count); 482 ISER_MIN_POSTED_RX);
632 } 483 err = iser_post_recvm(ib_conn, count);
633 else if (opcode == ISCSI_OP_ASYNC_EVENT) { 484 if (err)
634 /* asyncronous message */ 485 iser_err("posting %d rx bufs err %d\n", count, err);
635 atomic_inc(&conn->ib_conn->unexpected_pdu_count);
636 } 486 }
637 /* a reject PDU consumes the recv buf posted for the response */
638} 487}
639 488
640void iser_snd_completion(struct iser_desc *tx_desc) 489void iser_snd_completion(struct iser_tx_desc *tx_desc,
490 struct iser_conn *ib_conn)
641{ 491{
642 struct iser_dto *dto = &tx_desc->dto;
643 struct iser_conn *ib_conn = dto->ib_conn;
644 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
645 struct iscsi_conn *conn = iser_conn->iscsi_conn;
646 struct iscsi_task *task; 492 struct iscsi_task *task;
647 int resume_tx = 0; 493 struct iser_device *device = ib_conn->device;
648
649 iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
650
651 iser_dto_buffs_release(dto);
652 494
653 if (tx_desc->type == ISCSI_TX_DATAOUT) 495 if (tx_desc->type == ISCSI_TX_DATAOUT) {
496 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
497 ISER_HEADERS_LEN, DMA_TO_DEVICE);
654 kmem_cache_free(ig.desc_cache, tx_desc); 498 kmem_cache_free(ig.desc_cache, tx_desc);
655 499 }
656 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
657 ISER_QP_MAX_REQ_DTOS)
658 resume_tx = 1;
659 500
660 atomic_dec(&ib_conn->post_send_buf_count); 501 atomic_dec(&ib_conn->post_send_buf_count);
661 502
662 if (resume_tx) {
663 iser_dbg("%ld resuming tx\n",jiffies);
664 iscsi_conn_queue_work(conn);
665 }
666
667 if (tx_desc->type == ISCSI_TX_CONTROL) { 503 if (tx_desc->type == ISCSI_TX_CONTROL) {
668 /* this arithmetic is legal by libiscsi dd_data allocation */ 504 /* this arithmetic is legal by libiscsi dd_data allocation */
669 task = (void *) ((long)(void *)tx_desc - 505 task = (void *) ((long)(void *)tx_desc -
@@ -692,7 +528,6 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
692 528
693void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) 529void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
694{ 530{
695 int deferred;
696 int is_rdma_aligned = 1; 531 int is_rdma_aligned = 1;
697 struct iser_regd_buf *regd; 532 struct iser_regd_buf *regd;
698 533
@@ -710,32 +545,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
710 545
711 if (iser_task->dir[ISER_DIR_IN]) { 546 if (iser_task->dir[ISER_DIR_IN]) {
712 regd = &iser_task->rdma_regd[ISER_DIR_IN]; 547 regd = &iser_task->rdma_regd[ISER_DIR_IN];
713 deferred = iser_regd_buff_release(regd); 548 if (regd->reg.is_fmr)
714 if (deferred) { 549 iser_unreg_mem(&regd->reg);
715 iser_err("%d references remain for BUF-IN rdma reg\n",
716 atomic_read(&regd->ref_count));
717 }
718 } 550 }
719 551
720 if (iser_task->dir[ISER_DIR_OUT]) { 552 if (iser_task->dir[ISER_DIR_OUT]) {
721 regd = &iser_task->rdma_regd[ISER_DIR_OUT]; 553 regd = &iser_task->rdma_regd[ISER_DIR_OUT];
722 deferred = iser_regd_buff_release(regd); 554 if (regd->reg.is_fmr)
723 if (deferred) { 555 iser_unreg_mem(&regd->reg);
724 iser_err("%d references remain for BUF-OUT rdma reg\n",
725 atomic_read(&regd->ref_count));
726 }
727 } 556 }
728 557
729 /* if the data was unaligned, it was already unmapped and then copied */ 558 /* if the data was unaligned, it was already unmapped and then copied */
730 if (is_rdma_aligned) 559 if (is_rdma_aligned)
731 iser_dma_unmap_task_data(iser_task); 560 iser_dma_unmap_task_data(iser_task);
732} 561}
733
734void iser_dto_buffs_release(struct iser_dto *dto)
735{
736 int i;
737
738 for (i = 0; i < dto->regd_vector_len; i++)
739 iser_regd_buff_release(dto->regd[i]);
740}
741
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 274c883ef3ea..fb88d6896b67 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -41,62 +41,6 @@
41#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 41#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
42 42
43/** 43/**
44 * Decrements the reference count for the
45 * registered buffer & releases it
46 *
47 * returns 0 if released, 1 if deferred
48 */
49int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
50{
51 struct ib_device *dev;
52
53 if ((atomic_read(&regd_buf->ref_count) == 0) ||
54 atomic_dec_and_test(&regd_buf->ref_count)) {
55 /* if we used the dma mr, unreg is just NOP */
56 if (regd_buf->reg.is_fmr)
57 iser_unreg_mem(&regd_buf->reg);
58
59 if (regd_buf->dma_addr) {
60 dev = regd_buf->device->ib_device;
61 ib_dma_unmap_single(dev,
62 regd_buf->dma_addr,
63 regd_buf->data_size,
64 regd_buf->direction);
65 }
66 /* else this regd buf is associated with task which we */
67 /* dma_unmap_single/sg later */
68 return 0;
69 } else {
70 iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
71 return 1;
72 }
73}
74
75/**
76 * iser_reg_single - fills registered buffer descriptor with
77 * registration information
78 */
79void iser_reg_single(struct iser_device *device,
80 struct iser_regd_buf *regd_buf,
81 enum dma_data_direction direction)
82{
83 u64 dma_addr;
84
85 dma_addr = ib_dma_map_single(device->ib_device,
86 regd_buf->virt_addr,
87 regd_buf->data_size, direction);
88 BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
89
90 regd_buf->reg.lkey = device->mr->lkey;
91 regd_buf->reg.len = regd_buf->data_size;
92 regd_buf->reg.va = dma_addr;
93 regd_buf->reg.is_fmr = 0;
94
95 regd_buf->dma_addr = dma_addr;
96 regd_buf->direction = direction;
97}
98
99/**
100 * iser_start_rdma_unaligned_sg 44 * iser_start_rdma_unaligned_sg
101 */ 45 */
102static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 46static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
@@ -109,10 +53,10 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
109 unsigned long cmd_data_len = data->data_len; 53 unsigned long cmd_data_len = data->data_len;
110 54
111 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 55 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
112 mem = (void *)__get_free_pages(GFP_NOIO, 56 mem = (void *)__get_free_pages(GFP_ATOMIC,
113 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 57 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
114 else 58 else
115 mem = kmalloc(cmd_data_len, GFP_NOIO); 59 mem = kmalloc(cmd_data_len, GFP_ATOMIC);
116 60
117 if (mem == NULL) { 61 if (mem == NULL) {
118 iser_err("Failed to allocate mem size %d %d for copying sglist\n", 62 iser_err("Failed to allocate mem size %d %d for copying sglist\n",
@@ -474,9 +418,5 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
474 return err; 418 return err;
475 } 419 }
476 } 420 }
477
478 /* take a reference on this regd buf such that it will not be released *
479 * (eg in send dto completion) before we get the scsi response */
480 atomic_inc(&regd_buf->ref_count);
481 return 0; 421 return 0;
482} 422}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8579f32ce38e..308d17bb5146 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -37,9 +37,8 @@
37#include "iscsi_iser.h" 37#include "iscsi_iser.h"
38 38
39#define ISCSI_ISER_MAX_CONN 8 39#define ISCSI_ISER_MAX_CONN 8
40#define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \ 40#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
41 ISER_QP_MAX_REQ_DTOS) * \ 41#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
42 ISCSI_ISER_MAX_CONN)
43 42
44static void iser_cq_tasklet_fn(unsigned long data); 43static void iser_cq_tasklet_fn(unsigned long data);
45static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 44static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
@@ -67,15 +66,23 @@ static int iser_create_device_ib_res(struct iser_device *device)
67 if (IS_ERR(device->pd)) 66 if (IS_ERR(device->pd))
68 goto pd_err; 67 goto pd_err;
69 68
70 device->cq = ib_create_cq(device->ib_device, 69 device->rx_cq = ib_create_cq(device->ib_device,
71 iser_cq_callback, 70 iser_cq_callback,
72 iser_cq_event_callback, 71 iser_cq_event_callback,
73 (void *)device, 72 (void *)device,
74 ISER_MAX_CQ_LEN, 0); 73 ISER_MAX_RX_CQ_LEN, 0);
75 if (IS_ERR(device->cq)) 74 if (IS_ERR(device->rx_cq))
76 goto cq_err; 75 goto rx_cq_err;
77 76
78 if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP)) 77 device->tx_cq = ib_create_cq(device->ib_device,
78 NULL, iser_cq_event_callback,
79 (void *)device,
80 ISER_MAX_TX_CQ_LEN, 0);
81
82 if (IS_ERR(device->tx_cq))
83 goto tx_cq_err;
84
85 if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
79 goto cq_arm_err; 86 goto cq_arm_err;
80 87
81 tasklet_init(&device->cq_tasklet, 88 tasklet_init(&device->cq_tasklet,
@@ -93,8 +100,10 @@ static int iser_create_device_ib_res(struct iser_device *device)
93dma_mr_err: 100dma_mr_err:
94 tasklet_kill(&device->cq_tasklet); 101 tasklet_kill(&device->cq_tasklet);
95cq_arm_err: 102cq_arm_err:
96 ib_destroy_cq(device->cq); 103 ib_destroy_cq(device->tx_cq);
97cq_err: 104tx_cq_err:
105 ib_destroy_cq(device->rx_cq);
106rx_cq_err:
98 ib_dealloc_pd(device->pd); 107 ib_dealloc_pd(device->pd);
99pd_err: 108pd_err:
100 iser_err("failed to allocate an IB resource\n"); 109 iser_err("failed to allocate an IB resource\n");
@@ -112,11 +121,13 @@ static void iser_free_device_ib_res(struct iser_device *device)
112 tasklet_kill(&device->cq_tasklet); 121 tasklet_kill(&device->cq_tasklet);
113 122
114 (void)ib_dereg_mr(device->mr); 123 (void)ib_dereg_mr(device->mr);
115 (void)ib_destroy_cq(device->cq); 124 (void)ib_destroy_cq(device->tx_cq);
125 (void)ib_destroy_cq(device->rx_cq);
116 (void)ib_dealloc_pd(device->pd); 126 (void)ib_dealloc_pd(device->pd);
117 127
118 device->mr = NULL; 128 device->mr = NULL;
119 device->cq = NULL; 129 device->tx_cq = NULL;
130 device->rx_cq = NULL;
120 device->pd = NULL; 131 device->pd = NULL;
121} 132}
122 133
@@ -129,13 +140,23 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
129{ 140{
130 struct iser_device *device; 141 struct iser_device *device;
131 struct ib_qp_init_attr init_attr; 142 struct ib_qp_init_attr init_attr;
132 int ret; 143 int ret = -ENOMEM;
133 struct ib_fmr_pool_param params; 144 struct ib_fmr_pool_param params;
134 145
135 BUG_ON(ib_conn->device == NULL); 146 BUG_ON(ib_conn->device == NULL);
136 147
137 device = ib_conn->device; 148 device = ib_conn->device;
138 149
150 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
151 if (!ib_conn->login_buf) {
152 goto alloc_err;
153 ret = -ENOMEM;
154 }
155
156 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
157 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
158 DMA_FROM_DEVICE);
159
139 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 160 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
140 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 161 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
141 GFP_KERNEL); 162 GFP_KERNEL);
@@ -169,12 +190,12 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
169 190
170 init_attr.event_handler = iser_qp_event_callback; 191 init_attr.event_handler = iser_qp_event_callback;
171 init_attr.qp_context = (void *)ib_conn; 192 init_attr.qp_context = (void *)ib_conn;
172 init_attr.send_cq = device->cq; 193 init_attr.send_cq = device->tx_cq;
173 init_attr.recv_cq = device->cq; 194 init_attr.recv_cq = device->rx_cq;
174 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 195 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
175 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 196 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
176 init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN; 197 init_attr.cap.max_send_sge = 2;
177 init_attr.cap.max_recv_sge = 2; 198 init_attr.cap.max_recv_sge = 1;
178 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 199 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
179 init_attr.qp_type = IB_QPT_RC; 200 init_attr.qp_type = IB_QPT_RC;
180 201
@@ -192,6 +213,7 @@ qp_err:
192 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool); 213 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
193fmr_pool_err: 214fmr_pool_err:
194 kfree(ib_conn->page_vec); 215 kfree(ib_conn->page_vec);
216 kfree(ib_conn->login_buf);
195alloc_err: 217alloc_err:
196 iser_err("unable to alloc mem or create resource, err %d\n", ret); 218 iser_err("unable to alloc mem or create resource, err %d\n", ret);
197 return ret; 219 return ret;
@@ -278,17 +300,6 @@ static void iser_device_try_release(struct iser_device *device)
278 mutex_unlock(&ig.device_list_mutex); 300 mutex_unlock(&ig.device_list_mutex);
279} 301}
280 302
281int iser_conn_state_comp(struct iser_conn *ib_conn,
282 enum iser_ib_conn_state comp)
283{
284 int ret;
285
286 spin_lock_bh(&ib_conn->lock);
287 ret = (ib_conn->state == comp);
288 spin_unlock_bh(&ib_conn->lock);
289 return ret;
290}
291
292static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, 303static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
293 enum iser_ib_conn_state comp, 304 enum iser_ib_conn_state comp,
294 enum iser_ib_conn_state exch) 305 enum iser_ib_conn_state exch)
@@ -314,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
314 mutex_lock(&ig.connlist_mutex); 325 mutex_lock(&ig.connlist_mutex);
315 list_del(&ib_conn->conn_list); 326 list_del(&ib_conn->conn_list);
316 mutex_unlock(&ig.connlist_mutex); 327 mutex_unlock(&ig.connlist_mutex);
317 328 iser_free_rx_descriptors(ib_conn);
318 iser_free_ib_conn_res(ib_conn); 329 iser_free_ib_conn_res(ib_conn);
319 ib_conn->device = NULL; 330 ib_conn->device = NULL;
320 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 331 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
@@ -442,7 +453,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
442 ISCSI_ERR_CONN_FAILED); 453 ISCSI_ERR_CONN_FAILED);
443 454
444 /* Complete the termination process if no posts are pending */ 455 /* Complete the termination process if no posts are pending */
445 if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) && 456 if (ib_conn->post_recv_buf_count == 0 &&
446 (atomic_read(&ib_conn->post_send_buf_count) == 0)) { 457 (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
447 ib_conn->state = ISER_CONN_DOWN; 458 ib_conn->state = ISER_CONN_DOWN;
448 wake_up_interruptible(&ib_conn->wait); 459 wake_up_interruptible(&ib_conn->wait);
@@ -489,9 +500,8 @@ void iser_conn_init(struct iser_conn *ib_conn)
489{ 500{
490 ib_conn->state = ISER_CONN_INIT; 501 ib_conn->state = ISER_CONN_INIT;
491 init_waitqueue_head(&ib_conn->wait); 502 init_waitqueue_head(&ib_conn->wait);
492 atomic_set(&ib_conn->post_recv_buf_count, 0); 503 ib_conn->post_recv_buf_count = 0;
493 atomic_set(&ib_conn->post_send_buf_count, 0); 504 atomic_set(&ib_conn->post_send_buf_count, 0);
494 atomic_set(&ib_conn->unexpected_pdu_count, 0);
495 atomic_set(&ib_conn->refcount, 1); 505 atomic_set(&ib_conn->refcount, 1);
496 INIT_LIST_HEAD(&ib_conn->conn_list); 506 INIT_LIST_HEAD(&ib_conn->conn_list);
497 spin_lock_init(&ib_conn->lock); 507 spin_lock_init(&ib_conn->lock);
@@ -626,136 +636,97 @@ void iser_unreg_mem(struct iser_mem_reg *reg)
626 reg->mem_h = NULL; 636 reg->mem_h = NULL;
627} 637}
628 638
629/** 639int iser_post_recvl(struct iser_conn *ib_conn)
630 * iser_dto_to_iov - builds IOV from a dto descriptor
631 */
632static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len)
633{ 640{
634 int i; 641 struct ib_recv_wr rx_wr, *rx_wr_failed;
635 struct ib_sge *sge; 642 struct ib_sge sge;
636 struct iser_regd_buf *regd_buf; 643 int ib_ret;
637
638 if (dto->regd_vector_len > iov_len) {
639 iser_err("iov size %d too small for posting dto of len %d\n",
640 iov_len, dto->regd_vector_len);
641 BUG();
642 }
643 644
644 for (i = 0; i < dto->regd_vector_len; i++) { 645 sge.addr = ib_conn->login_dma;
645 sge = &iov[i]; 646 sge.length = ISER_RX_LOGIN_SIZE;
646 regd_buf = dto->regd[i]; 647 sge.lkey = ib_conn->device->mr->lkey;
647
648 sge->addr = regd_buf->reg.va;
649 sge->length = regd_buf->reg.len;
650 sge->lkey = regd_buf->reg.lkey;
651
652 if (dto->used_sz[i] > 0) /* Adjust size */
653 sge->length = dto->used_sz[i];
654
655 /* offset and length should not exceed the regd buf length */
656 if (sge->length + dto->offset[i] > regd_buf->reg.len) {
657 iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:"
658 "%ld in dto:0x%p [%d], va:0x%08lX\n",
659 (unsigned long)sge->length, dto->offset[i],
660 (unsigned long)regd_buf->reg.len, dto, i,
661 (unsigned long)sge->addr);
662 BUG();
663 }
664 648
665 sge->addr += dto->offset[i]; /* Adjust offset */ 649 rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
650 rx_wr.sg_list = &sge;
651 rx_wr.num_sge = 1;
652 rx_wr.next = NULL;
653
654 ib_conn->post_recv_buf_count++;
655 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
656 if (ib_ret) {
657 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
658 ib_conn->post_recv_buf_count--;
666 } 659 }
660 return ib_ret;
667} 661}
668 662
669/** 663int iser_post_recvm(struct iser_conn *ib_conn, int count)
670 * iser_post_recv - Posts a receive buffer.
671 *
672 * returns 0 on success, -1 on failure
673 */
674int iser_post_recv(struct iser_desc *rx_desc)
675{ 664{
676 int ib_ret, ret_val = 0; 665 struct ib_recv_wr *rx_wr, *rx_wr_failed;
677 struct ib_recv_wr recv_wr, *recv_wr_failed; 666 int i, ib_ret;
678 struct ib_sge iov[2]; 667 unsigned int my_rx_head = ib_conn->rx_desc_head;
679 struct iser_conn *ib_conn; 668 struct iser_rx_desc *rx_desc;
680 struct iser_dto *recv_dto = &rx_desc->dto; 669
681 670 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
682 /* Retrieve conn */ 671 rx_desc = &ib_conn->rx_descs[my_rx_head];
683 ib_conn = recv_dto->ib_conn; 672 rx_wr->wr_id = (unsigned long)rx_desc;
684 673 rx_wr->sg_list = &rx_desc->rx_sg;
685 iser_dto_to_iov(recv_dto, iov, 2); 674 rx_wr->num_sge = 1;
675 rx_wr->next = rx_wr + 1;
676 my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
677 }
686 678
687 recv_wr.next = NULL; 679 rx_wr--;
688 recv_wr.sg_list = iov; 680 rx_wr->next = NULL; /* mark end of work requests list */
689 recv_wr.num_sge = recv_dto->regd_vector_len;
690 recv_wr.wr_id = (unsigned long)rx_desc;
691 681
692 atomic_inc(&ib_conn->post_recv_buf_count); 682 ib_conn->post_recv_buf_count += count;
693 ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed); 683 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
694 if (ib_ret) { 684 if (ib_ret) {
695 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 685 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
696 atomic_dec(&ib_conn->post_recv_buf_count); 686 ib_conn->post_recv_buf_count -= count;
697 ret_val = -1; 687 } else
698 } 688 ib_conn->rx_desc_head = my_rx_head;
699 689 return ib_ret;
700 return ret_val;
701} 690}
702 691
692
703/** 693/**
704 * iser_start_send - Initiate a Send DTO operation 694 * iser_start_send - Initiate a Send DTO operation
705 * 695 *
706 * returns 0 on success, -1 on failure 696 * returns 0 on success, -1 on failure
707 */ 697 */
708int iser_post_send(struct iser_desc *tx_desc) 698int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
709{ 699{
710 int ib_ret, ret_val = 0; 700 int ib_ret;
711 struct ib_send_wr send_wr, *send_wr_failed; 701 struct ib_send_wr send_wr, *send_wr_failed;
712 struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN];
713 struct iser_conn *ib_conn;
714 struct iser_dto *dto = &tx_desc->dto;
715 702
716 ib_conn = dto->ib_conn; 703 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
717 704 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
718 iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
719 705
720 send_wr.next = NULL; 706 send_wr.next = NULL;
721 send_wr.wr_id = (unsigned long)tx_desc; 707 send_wr.wr_id = (unsigned long)tx_desc;
722 send_wr.sg_list = iov; 708 send_wr.sg_list = tx_desc->tx_sg;
723 send_wr.num_sge = dto->regd_vector_len; 709 send_wr.num_sge = tx_desc->num_sge;
724 send_wr.opcode = IB_WR_SEND; 710 send_wr.opcode = IB_WR_SEND;
725 send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0; 711 send_wr.send_flags = IB_SEND_SIGNALED;
726 712
727 atomic_inc(&ib_conn->post_send_buf_count); 713 atomic_inc(&ib_conn->post_send_buf_count);
728 714
729 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); 715 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
730 if (ib_ret) { 716 if (ib_ret) {
731 iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n",
732 dto, dto->regd_vector_len);
733 iser_err("ib_post_send failed, ret:%d\n", ib_ret); 717 iser_err("ib_post_send failed, ret:%d\n", ib_ret);
734 atomic_dec(&ib_conn->post_send_buf_count); 718 atomic_dec(&ib_conn->post_send_buf_count);
735 ret_val = -1;
736 } 719 }
737 720 return ib_ret;
738 return ret_val;
739} 721}
740 722
741static void iser_handle_comp_error(struct iser_desc *desc) 723static void iser_handle_comp_error(struct iser_tx_desc *desc,
724 struct iser_conn *ib_conn)
742{ 725{
743 struct iser_dto *dto = &desc->dto; 726 if (desc && desc->type == ISCSI_TX_DATAOUT)
744 struct iser_conn *ib_conn = dto->ib_conn;
745
746 iser_dto_buffs_release(dto);
747
748 if (desc->type == ISCSI_RX) {
749 kfree(desc->data);
750 kmem_cache_free(ig.desc_cache, desc); 727 kmem_cache_free(ig.desc_cache, desc);
751 atomic_dec(&ib_conn->post_recv_buf_count);
752 } else { /* type is TX control/command/dataout */
753 if (desc->type == ISCSI_TX_DATAOUT)
754 kmem_cache_free(ig.desc_cache, desc);
755 atomic_dec(&ib_conn->post_send_buf_count);
756 }
757 728
758 if (atomic_read(&ib_conn->post_recv_buf_count) == 0 && 729 if (ib_conn->post_recv_buf_count == 0 &&
759 atomic_read(&ib_conn->post_send_buf_count) == 0) { 730 atomic_read(&ib_conn->post_send_buf_count) == 0) {
760 /* getting here when the state is UP means that the conn is * 731 /* getting here when the state is UP means that the conn is *
761 * being terminated asynchronously from the iSCSI layer's * 732 * being terminated asynchronously from the iSCSI layer's *
@@ -774,32 +745,74 @@ static void iser_handle_comp_error(struct iser_desc *desc)
774 } 745 }
775} 746}
776 747
748static int iser_drain_tx_cq(struct iser_device *device)
749{
750 struct ib_cq *cq = device->tx_cq;
751 struct ib_wc wc;
752 struct iser_tx_desc *tx_desc;
753 struct iser_conn *ib_conn;
754 int completed_tx = 0;
755
756 while (ib_poll_cq(cq, 1, &wc) == 1) {
757 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
758 ib_conn = wc.qp->qp_context;
759 if (wc.status == IB_WC_SUCCESS) {
760 if (wc.opcode == IB_WC_SEND)
761 iser_snd_completion(tx_desc, ib_conn);
762 else
763 iser_err("expected opcode %d got %d\n",
764 IB_WC_SEND, wc.opcode);
765 } else {
766 iser_err("tx id %llx status %d vend_err %x\n",
767 wc.wr_id, wc.status, wc.vendor_err);
768 atomic_dec(&ib_conn->post_send_buf_count);
769 iser_handle_comp_error(tx_desc, ib_conn);
770 }
771 completed_tx++;
772 }
773 return completed_tx;
774}
775
776
777static void iser_cq_tasklet_fn(unsigned long data) 777static void iser_cq_tasklet_fn(unsigned long data)
778{ 778{
779 struct iser_device *device = (struct iser_device *)data; 779 struct iser_device *device = (struct iser_device *)data;
780 struct ib_cq *cq = device->cq; 780 struct ib_cq *cq = device->rx_cq;
781 struct ib_wc wc; 781 struct ib_wc wc;
782 struct iser_desc *desc; 782 struct iser_rx_desc *desc;
783 unsigned long xfer_len; 783 unsigned long xfer_len;
784 struct iser_conn *ib_conn;
785 int completed_tx, completed_rx;
786 completed_tx = completed_rx = 0;
784 787
785 while (ib_poll_cq(cq, 1, &wc) == 1) { 788 while (ib_poll_cq(cq, 1, &wc) == 1) {
786 desc = (struct iser_desc *) (unsigned long) wc.wr_id; 789 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
787 BUG_ON(desc == NULL); 790 BUG_ON(desc == NULL);
788 791 ib_conn = wc.qp->qp_context;
789 if (wc.status == IB_WC_SUCCESS) { 792 if (wc.status == IB_WC_SUCCESS) {
790 if (desc->type == ISCSI_RX) { 793 if (wc.opcode == IB_WC_RECV) {
791 xfer_len = (unsigned long)wc.byte_len; 794 xfer_len = (unsigned long)wc.byte_len;
792 iser_rcv_completion(desc, xfer_len); 795 iser_rcv_completion(desc, xfer_len, ib_conn);
793 } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */ 796 } else
794 iser_snd_completion(desc); 797 iser_err("expected opcode %d got %d\n",
798 IB_WC_RECV, wc.opcode);
795 } else { 799 } else {
796 iser_err("comp w. error op %d status %d\n",desc->type,wc.status); 800 if (wc.status != IB_WC_WR_FLUSH_ERR)
797 iser_handle_comp_error(desc); 801 iser_err("rx id %llx status %d vend_err %x\n",
802 wc.wr_id, wc.status, wc.vendor_err);
803 ib_conn->post_recv_buf_count--;
804 iser_handle_comp_error(NULL, ib_conn);
798 } 805 }
806 completed_rx++;
807 if (!(completed_rx & 63))
808 completed_tx += iser_drain_tx_cq(device);
799 } 809 }
800 /* #warning "it is assumed here that arming CQ only once its empty" * 810 /* #warning "it is assumed here that arming CQ only once its empty" *
801 * " would not cause interrupts to be missed" */ 811 * " would not cause interrupts to be missed" */
802 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 812 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
813
814 completed_tx += iser_drain_tx_cq(device);
815 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
803} 816}
804 817
805static void iser_cq_callback(struct ib_cq *cq, void *cq_context) 818static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 54c8fe25c423..ed3f9ebae882 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds,
80 80
81static void srp_add_one(struct ib_device *device); 81static void srp_add_one(struct ib_device *device);
82static void srp_remove_one(struct ib_device *device); 82static void srp_remove_one(struct ib_device *device);
83static void srp_completion(struct ib_cq *cq, void *target_ptr); 83static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
84static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
84static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 85static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
85 86
86static struct scsi_transport_template *ib_srp_transport_template; 87static struct scsi_transport_template *ib_srp_transport_template;
@@ -227,14 +228,21 @@ static int srp_create_target_ib(struct srp_target_port *target)
227 if (!init_attr) 228 if (!init_attr)
228 return -ENOMEM; 229 return -ENOMEM;
229 230
230 target->cq = ib_create_cq(target->srp_host->srp_dev->dev, 231 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
231 srp_completion, NULL, target, SRP_CQ_SIZE, 0); 232 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
232 if (IS_ERR(target->cq)) { 233 if (IS_ERR(target->recv_cq)) {
233 ret = PTR_ERR(target->cq); 234 ret = PTR_ERR(target->recv_cq);
234 goto out; 235 goto err;
235 } 236 }
236 237
237 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 238 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
239 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
240 if (IS_ERR(target->send_cq)) {
241 ret = PTR_ERR(target->send_cq);
242 goto err_recv_cq;
243 }
244
245 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
238 246
239 init_attr->event_handler = srp_qp_event; 247 init_attr->event_handler = srp_qp_event;
240 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 248 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -243,24 +251,32 @@ static int srp_create_target_ib(struct srp_target_port *target)
243 init_attr->cap.max_send_sge = 1; 251 init_attr->cap.max_send_sge = 1;
244 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 252 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
245 init_attr->qp_type = IB_QPT_RC; 253 init_attr->qp_type = IB_QPT_RC;
246 init_attr->send_cq = target->cq; 254 init_attr->send_cq = target->send_cq;
247 init_attr->recv_cq = target->cq; 255 init_attr->recv_cq = target->recv_cq;
248 256
249 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); 257 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
250 if (IS_ERR(target->qp)) { 258 if (IS_ERR(target->qp)) {
251 ret = PTR_ERR(target->qp); 259 ret = PTR_ERR(target->qp);
252 ib_destroy_cq(target->cq); 260 goto err_send_cq;
253 goto out;
254 } 261 }
255 262
256 ret = srp_init_qp(target, target->qp); 263 ret = srp_init_qp(target, target->qp);
257 if (ret) { 264 if (ret)
258 ib_destroy_qp(target->qp); 265 goto err_qp;
259 ib_destroy_cq(target->cq);
260 goto out;
261 }
262 266
263out: 267 kfree(init_attr);
268 return 0;
269
270err_qp:
271 ib_destroy_qp(target->qp);
272
273err_send_cq:
274 ib_destroy_cq(target->send_cq);
275
276err_recv_cq:
277 ib_destroy_cq(target->recv_cq);
278
279err:
264 kfree(init_attr); 280 kfree(init_attr);
265 return ret; 281 return ret;
266} 282}
@@ -270,7 +286,8 @@ static void srp_free_target_ib(struct srp_target_port *target)
270 int i; 286 int i;
271 287
272 ib_destroy_qp(target->qp); 288 ib_destroy_qp(target->qp);
273 ib_destroy_cq(target->cq); 289 ib_destroy_cq(target->send_cq);
290 ib_destroy_cq(target->recv_cq);
274 291
275 for (i = 0; i < SRP_RQ_SIZE; ++i) 292 for (i = 0; i < SRP_RQ_SIZE; ++i)
276 srp_free_iu(target->srp_host, target->rx_ring[i]); 293 srp_free_iu(target->srp_host, target->rx_ring[i]);
@@ -568,7 +585,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
568 if (ret) 585 if (ret)
569 goto err; 586 goto err;
570 587
571 while (ib_poll_cq(target->cq, 1, &wc) > 0) 588 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
589 ; /* nothing */
590 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
572 ; /* nothing */ 591 ; /* nothing */
573 592
574 spin_lock_irq(target->scsi_host->host_lock); 593 spin_lock_irq(target->scsi_host->host_lock);
@@ -851,7 +870,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
851 struct srp_iu *iu; 870 struct srp_iu *iu;
852 u8 opcode; 871 u8 opcode;
853 872
854 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 873 iu = target->rx_ring[wc->wr_id];
855 874
856 dev = target->srp_host->srp_dev->dev; 875 dev = target->srp_host->srp_dev->dev;
857 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 876 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
@@ -898,7 +917,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
898 DMA_FROM_DEVICE); 917 DMA_FROM_DEVICE);
899} 918}
900 919
901static void srp_completion(struct ib_cq *cq, void *target_ptr) 920static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
902{ 921{
903 struct srp_target_port *target = target_ptr; 922 struct srp_target_port *target = target_ptr;
904 struct ib_wc wc; 923 struct ib_wc wc;
@@ -907,17 +926,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
907 while (ib_poll_cq(cq, 1, &wc) > 0) { 926 while (ib_poll_cq(cq, 1, &wc) > 0) {
908 if (wc.status) { 927 if (wc.status) {
909 shost_printk(KERN_ERR, target->scsi_host, 928 shost_printk(KERN_ERR, target->scsi_host,
910 PFX "failed %s status %d\n", 929 PFX "failed receive status %d\n",
911 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
912 wc.status); 930 wc.status);
913 target->qp_in_error = 1; 931 target->qp_in_error = 1;
914 break; 932 break;
915 } 933 }
916 934
917 if (wc.wr_id & SRP_OP_RECV) 935 srp_handle_recv(target, &wc);
918 srp_handle_recv(target, &wc); 936 }
919 else 937}
920 ++target->tx_tail; 938
939static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
940{
941 struct srp_target_port *target = target_ptr;
942 struct ib_wc wc;
943
944 while (ib_poll_cq(cq, 1, &wc) > 0) {
945 if (wc.status) {
946 shost_printk(KERN_ERR, target->scsi_host,
947 PFX "failed send status %d\n",
948 wc.status);
949 target->qp_in_error = 1;
950 break;
951 }
952
953 ++target->tx_tail;
921 } 954 }
922} 955}
923 956
@@ -930,7 +963,7 @@ static int __srp_post_recv(struct srp_target_port *target)
930 int ret; 963 int ret;
931 964
932 next = target->rx_head & (SRP_RQ_SIZE - 1); 965 next = target->rx_head & (SRP_RQ_SIZE - 1);
933 wr.wr_id = next | SRP_OP_RECV; 966 wr.wr_id = next;
934 iu = target->rx_ring[next]; 967 iu = target->rx_ring[next];
935 968
936 list.addr = iu->dma; 969 list.addr = iu->dma;
@@ -970,6 +1003,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
970{ 1003{
971 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; 1004 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
972 1005
1006 srp_send_completion(target->send_cq, target);
1007
973 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 1008 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
974 return NULL; 1009 return NULL;
975 1010
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e185b907fc12..5a80eac6fdaa 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -60,7 +60,6 @@ enum {
60 SRP_RQ_SHIFT = 6, 60 SRP_RQ_SHIFT = 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62 SRP_SQ_SIZE = SRP_RQ_SIZE - 1, 62 SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
63 SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
64 63
65 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 64 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
66 65
@@ -69,8 +68,6 @@ enum {
69 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 68 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
70}; 69};
71 70
72#define SRP_OP_RECV (1 << 31)
73
74enum srp_target_state { 71enum srp_target_state {
75 SRP_TARGET_LIVE, 72 SRP_TARGET_LIVE,
76 SRP_TARGET_CONNECTING, 73 SRP_TARGET_CONNECTING,
@@ -133,7 +130,8 @@ struct srp_target_port {
133 int path_query_id; 130 int path_query_id;
134 131
135 struct ib_cm_id *cm_id; 132 struct ib_cm_id *cm_id;
136 struct ib_cq *cq; 133 struct ib_cq *recv_cq;
134 struct ib_cq *send_cq;
137 struct ib_qp *qp; 135 struct ib_qp *qp;
138 136
139 int max_ti_iu_len; 137 int max_ti_iu_len;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 509c8f3dd9a5..70ffbd071b2e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
4680{ 4680{
4681 unsigned long cpu; 4681 unsigned long cpu;
4682 struct page *spare_page; 4682 struct page *spare_page;
4683 struct raid5_percpu *allcpus; 4683 struct raid5_percpu __percpu *allcpus;
4684 void *scribble; 4684 void *scribble;
4685 int err; 4685 int err;
4686 4686
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index dd708359b451..0f86f5e36724 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -405,7 +405,7 @@ struct raid5_private_data {
405 * lists and performing address 405 * lists and performing address
406 * conversions 406 * conversions
407 */ 407 */
408 } *percpu; 408 } __percpu *percpu;
409 size_t scribble_len; /* size of scribble region must be 409 size_t scribble_len; /* size of scribble region must be
410 * associated with conf to handle 410 * associated with conf to handle
411 * cpu hotplug while reshaping 411 * cpu hotplug while reshaping
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 7a3de16fba06..75afe4f81e33 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -239,47 +239,18 @@ static const struct fw_address_region fcp_region = {
239}; 239};
240 240
241/* Adjust the template string if models with longer names appear. */ 241/* Adjust the template string if models with longer names appear. */
242#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4)) 242#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????")
243
244static size_t model_name(u32 *directory, __be32 *buffer)
245{
246 struct fw_csr_iterator ci;
247 int i, length, key, value, last_key = 0;
248 u32 *block = NULL;
249
250 fw_csr_iterator_init(&ci, directory);
251 while (fw_csr_iterator_next(&ci, &key, &value)) {
252 if (last_key == CSR_MODEL &&
253 key == (CSR_DESCRIPTOR | CSR_LEAF))
254 block = ci.p - 1 + value;
255 last_key = key;
256 }
257
258 if (block == NULL)
259 return 0;
260
261 length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN);
262 if (length <= 0)
263 return 0;
264
265 /* fast-forward to text string */
266 block += 3;
267
268 for (i = 0; i < length; i++)
269 buffer[i] = cpu_to_be32(block[i]);
270
271 return length * 4;
272}
273 243
274static int node_probe(struct device *dev) 244static int node_probe(struct device *dev)
275{ 245{
276 struct firedtv *fdtv; 246 struct firedtv *fdtv;
277 __be32 name[MAX_MODEL_NAME_LEN]; 247 char name[MAX_MODEL_NAME_LEN];
278 int name_len, err; 248 int name_len, err;
279 249
280 name_len = model_name(fw_unit(dev)->directory, name); 250 name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL,
251 name, sizeof(name));
281 252
282 fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len); 253 fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0);
283 if (!fdtv) 254 if (!fdtv)
284 return -ENOMEM; 255 return -ENOMEM;
285 256
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index 9b413a35e048..0f505086774c 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -616,10 +616,12 @@ static int dabusb_open (struct inode *inode, struct file *file)
616{ 616{
617 int devnum = iminor(inode); 617 int devnum = iminor(inode);
618 pdabusb_t s; 618 pdabusb_t s;
619 int r;
619 620
620 if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB)) 621 if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB))
621 return -EIO; 622 return -EIO;
622 623
624 lock_kernel();
623 s = &dabusb[devnum - DABUSB_MINOR]; 625 s = &dabusb[devnum - DABUSB_MINOR];
624 626
625 dbg("dabusb_open"); 627 dbg("dabusb_open");
@@ -634,6 +636,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
634 msleep_interruptible(500); 636 msleep_interruptible(500);
635 637
636 if (signal_pending (current)) { 638 if (signal_pending (current)) {
639 unlock_kernel();
637 return -EAGAIN; 640 return -EAGAIN;
638 } 641 }
639 mutex_lock(&s->mutex); 642 mutex_lock(&s->mutex);
@@ -641,6 +644,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
641 if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) { 644 if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) {
642 mutex_unlock(&s->mutex); 645 mutex_unlock(&s->mutex);
643 dev_err(&s->usbdev->dev, "set_interface failed\n"); 646 dev_err(&s->usbdev->dev, "set_interface failed\n");
647 unlock_kernel();
644 return -EINVAL; 648 return -EINVAL;
645 } 649 }
646 s->opened = 1; 650 s->opened = 1;
@@ -649,7 +653,9 @@ static int dabusb_open (struct inode *inode, struct file *file)
649 file->f_pos = 0; 653 file->f_pos = 0;
650 file->private_data = s; 654 file->private_data = s;
651 655
652 return nonseekable_open(inode, file); 656 r = nonseekable_open(inode, file);
657 unlock_kernel();
658 return r;
653} 659}
654 660
655static int dabusb_release (struct inode *inode, struct file *file) 661static int dabusb_release (struct inode *inode, struct file *file)
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index f53755533e7e..3fab78ba8952 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -37,6 +37,7 @@
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <linux/tty.h> 38#include <linux/tty.h>
39#include <linux/tty_flip.h> 39#include <linux/tty_flip.h>
40#include <linux/kfifo.h>
40 41
41#include <linux/mmc/core.h> 42#include <linux/mmc/core.h>
42#include <linux/mmc/card.h> 43#include <linux/mmc/card.h>
@@ -47,19 +48,9 @@
47#define UART_NR 8 /* Number of UARTs this driver can handle */ 48#define UART_NR 8 /* Number of UARTs this driver can handle */
48 49
49 50
50#define UART_XMIT_SIZE PAGE_SIZE 51#define FIFO_SIZE PAGE_SIZE
51#define WAKEUP_CHARS 256 52#define WAKEUP_CHARS 256
52 53
53#define circ_empty(circ) ((circ)->head == (circ)->tail)
54#define circ_clear(circ) ((circ)->head = (circ)->tail = 0)
55
56#define circ_chars_pending(circ) \
57 (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
58
59#define circ_chars_free(circ) \
60 (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
61
62
63struct uart_icount { 54struct uart_icount {
64 __u32 cts; 55 __u32 cts;
65 __u32 dsr; 56 __u32 dsr;
@@ -82,7 +73,7 @@ struct sdio_uart_port {
82 struct mutex func_lock; 73 struct mutex func_lock;
83 struct task_struct *in_sdio_uart_irq; 74 struct task_struct *in_sdio_uart_irq;
84 unsigned int regs_offset; 75 unsigned int regs_offset;
85 struct circ_buf xmit; 76 struct kfifo xmit_fifo;
86 spinlock_t write_lock; 77 spinlock_t write_lock;
87 struct uart_icount icount; 78 struct uart_icount icount;
88 unsigned int uartclk; 79 unsigned int uartclk;
@@ -105,6 +96,8 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
105 kref_init(&port->kref); 96 kref_init(&port->kref);
106 mutex_init(&port->func_lock); 97 mutex_init(&port->func_lock);
107 spin_lock_init(&port->write_lock); 98 spin_lock_init(&port->write_lock);
99 if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
100 return -ENOMEM;
108 101
109 spin_lock(&sdio_uart_table_lock); 102 spin_lock(&sdio_uart_table_lock);
110 for (index = 0; index < UART_NR; index++) { 103 for (index = 0; index < UART_NR; index++) {
@@ -140,6 +133,7 @@ static void sdio_uart_port_destroy(struct kref *kref)
140{ 133{
141 struct sdio_uart_port *port = 134 struct sdio_uart_port *port =
142 container_of(kref, struct sdio_uart_port, kref); 135 container_of(kref, struct sdio_uart_port, kref);
136 kfifo_free(&port->xmit_fifo);
143 kfree(port); 137 kfree(port);
144} 138}
145 139
@@ -456,9 +450,11 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
456 450
457static void sdio_uart_transmit_chars(struct sdio_uart_port *port) 451static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
458{ 452{
459 struct circ_buf *xmit = &port->xmit; 453 struct kfifo *xmit = &port->xmit_fifo;
460 int count; 454 int count;
461 struct tty_struct *tty; 455 struct tty_struct *tty;
456 u8 iobuf[16];
457 int len;
462 458
463 if (port->x_char) { 459 if (port->x_char) {
464 sdio_out(port, UART_TX, port->x_char); 460 sdio_out(port, UART_TX, port->x_char);
@@ -469,27 +465,25 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
469 465
470 tty = tty_port_tty_get(&port->port); 466 tty = tty_port_tty_get(&port->port);
471 467
472 if (tty == NULL || circ_empty(xmit) || 468 if (tty == NULL || !kfifo_len(xmit) ||
473 tty->stopped || tty->hw_stopped) { 469 tty->stopped || tty->hw_stopped) {
474 sdio_uart_stop_tx(port); 470 sdio_uart_stop_tx(port);
475 tty_kref_put(tty); 471 tty_kref_put(tty);
476 return; 472 return;
477 } 473 }
478 474
479 count = 16; 475 len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
480 do { 476 for (count = 0; count < len; count++) {
481 sdio_out(port, UART_TX, xmit->buf[xmit->tail]); 477 sdio_out(port, UART_TX, iobuf[count]);
482 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
483 port->icount.tx++; 478 port->icount.tx++;
484 if (circ_empty(xmit)) 479 }
485 break;
486 } while (--count > 0);
487 480
488 if (circ_chars_pending(xmit) < WAKEUP_CHARS) 481 len = kfifo_len(xmit);
482 if (len < WAKEUP_CHARS) {
489 tty_wakeup(tty); 483 tty_wakeup(tty);
490 484 if (len == 0)
491 if (circ_empty(xmit)) 485 sdio_uart_stop_tx(port);
492 sdio_uart_stop_tx(port); 486 }
493 tty_kref_put(tty); 487 tty_kref_put(tty);
494} 488}
495 489
@@ -632,7 +626,6 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
632{ 626{
633 struct sdio_uart_port *port = 627 struct sdio_uart_port *port =
634 container_of(tport, struct sdio_uart_port, port); 628 container_of(tport, struct sdio_uart_port, port);
635 unsigned long page;
636 int ret; 629 int ret;
637 630
638 /* 631 /*
@@ -641,22 +634,17 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
641 */ 634 */
642 set_bit(TTY_IO_ERROR, &tty->flags); 635 set_bit(TTY_IO_ERROR, &tty->flags);
643 636
644 /* Initialise and allocate the transmit buffer. */ 637 kfifo_reset(&port->xmit_fifo);
645 page = __get_free_page(GFP_KERNEL);
646 if (!page)
647 return -ENOMEM;
648 port->xmit.buf = (unsigned char *)page;
649 circ_clear(&port->xmit);
650 638
651 ret = sdio_uart_claim_func(port); 639 ret = sdio_uart_claim_func(port);
652 if (ret) 640 if (ret)
653 goto err1; 641 return ret;
654 ret = sdio_enable_func(port->func); 642 ret = sdio_enable_func(port->func);
655 if (ret) 643 if (ret)
656 goto err2; 644 goto err1;
657 ret = sdio_claim_irq(port->func, sdio_uart_irq); 645 ret = sdio_claim_irq(port->func, sdio_uart_irq);
658 if (ret) 646 if (ret)
659 goto err3; 647 goto err2;
660 648
661 /* 649 /*
662 * Clear the FIFO buffers and disable them. 650 * Clear the FIFO buffers and disable them.
@@ -700,12 +688,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
700 sdio_uart_release_func(port); 688 sdio_uart_release_func(port);
701 return 0; 689 return 0;
702 690
703err3:
704 sdio_disable_func(port->func);
705err2: 691err2:
706 sdio_uart_release_func(port); 692 sdio_disable_func(port->func);
707err1: 693err1:
708 free_page((unsigned long)port->xmit.buf); 694 sdio_uart_release_func(port);
709 return ret; 695 return ret;
710} 696}
711 697
@@ -727,7 +713,7 @@ static void sdio_uart_shutdown(struct tty_port *tport)
727 713
728 ret = sdio_uart_claim_func(port); 714 ret = sdio_uart_claim_func(port);
729 if (ret) 715 if (ret)
730 goto skip; 716 return;
731 717
732 sdio_uart_stop_rx(port); 718 sdio_uart_stop_rx(port);
733 719
@@ -749,10 +735,6 @@ static void sdio_uart_shutdown(struct tty_port *tport)
749 sdio_disable_func(port->func); 735 sdio_disable_func(port->func);
750 736
751 sdio_uart_release_func(port); 737 sdio_uart_release_func(port);
752
753skip:
754 /* Free the transmit buffer page. */
755 free_page((unsigned long)port->xmit.buf);
756} 738}
757 739
758/** 740/**
@@ -822,27 +804,12 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
822 int count) 804 int count)
823{ 805{
824 struct sdio_uart_port *port = tty->driver_data; 806 struct sdio_uart_port *port = tty->driver_data;
825 struct circ_buf *circ = &port->xmit; 807 int ret;
826 int c, ret = 0;
827 808
828 if (!port->func) 809 if (!port->func)
829 return -ENODEV; 810 return -ENODEV;
830 811
831 spin_lock(&port->write_lock); 812 ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
832 while (1) {
833 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
834 if (count < c)
835 c = count;
836 if (c <= 0)
837 break;
838 memcpy(circ->buf + circ->head, buf, c);
839 circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
840 buf += c;
841 count -= c;
842 ret += c;
843 }
844 spin_unlock(&port->write_lock);
845
846 if (!(port->ier & UART_IER_THRI)) { 813 if (!(port->ier & UART_IER_THRI)) {
847 int err = sdio_uart_claim_func(port); 814 int err = sdio_uart_claim_func(port);
848 if (!err) { 815 if (!err) {
@@ -859,13 +826,13 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
859static int sdio_uart_write_room(struct tty_struct *tty) 826static int sdio_uart_write_room(struct tty_struct *tty)
860{ 827{
861 struct sdio_uart_port *port = tty->driver_data; 828 struct sdio_uart_port *port = tty->driver_data;
862 return port ? circ_chars_free(&port->xmit) : 0; 829 return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
863} 830}
864 831
865static int sdio_uart_chars_in_buffer(struct tty_struct *tty) 832static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
866{ 833{
867 struct sdio_uart_port *port = tty->driver_data; 834 struct sdio_uart_port *port = tty->driver_data;
868 return port ? circ_chars_pending(&port->xmit) : 0; 835 return kfifo_len(&port->xmit_fifo);
869} 836}
870 837
871static void sdio_uart_send_xchar(struct tty_struct *tty, char ch) 838static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efbc..4cd7f420766a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
264 struct work_struct fatal_error_handler_task; 264 struct work_struct fatal_error_handler_task;
265 struct work_struct link_fault_handler_task; 265 struct work_struct link_fault_handler_task;
266 266
267 struct work_struct db_full_task;
268 struct work_struct db_empty_task;
269 struct work_struct db_drop_task;
270
267 struct dentry *debugfs_root; 271 struct dentry *debugfs_root;
268 272
269 struct mutex mdio_lock; 273 struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
335int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 339int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
336 unsigned char *data); 340 unsigned char *data);
337irqreturn_t t3_sge_intr_msix(int irq, void *cookie); 341irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
342extern struct workqueue_struct *cxgb3_wq;
338 343
339int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); 344int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
340 345
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 6fd968abb073..3e453e1d97e7 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h> 47#include <linux/stringify.h>
48#include <linux/sched.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#include "common.h" 51#include "common.h"
@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting 141 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this. 142 * for our work to complete. Get our own work queue to solve this.
142 */ 143 */
143static struct workqueue_struct *cxgb3_wq; 144struct workqueue_struct *cxgb3_wq;
144 145
145/** 146/**
146 * link_report - show link status and link speed/duplex 147 * link_report - show link status and link speed/duplex
@@ -586,6 +587,19 @@ static void setup_rss(struct adapter *adap)
586 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); 587 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
587} 588}
588 589
590static void ring_dbs(struct adapter *adap)
591{
592 int i, j;
593
594 for (i = 0; i < SGE_QSETS; i++) {
595 struct sge_qset *qs = &adap->sge.qs[i];
596
597 if (qs->adap)
598 for (j = 0; j < SGE_TXQ_PER_SET; j++)
599 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
600 }
601}
602
589static void init_napi(struct adapter *adap) 603static void init_napi(struct adapter *adap)
590{ 604{
591 int i; 605 int i;
@@ -2750,6 +2764,42 @@ static void t3_adap_check_task(struct work_struct *work)
2750 spin_unlock_irq(&adapter->work_lock); 2764 spin_unlock_irq(&adapter->work_lock);
2751} 2765}
2752 2766
2767static void db_full_task(struct work_struct *work)
2768{
2769 struct adapter *adapter = container_of(work, struct adapter,
2770 db_full_task);
2771
2772 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2773}
2774
2775static void db_empty_task(struct work_struct *work)
2776{
2777 struct adapter *adapter = container_of(work, struct adapter,
2778 db_empty_task);
2779
2780 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2781}
2782
2783static void db_drop_task(struct work_struct *work)
2784{
2785 struct adapter *adapter = container_of(work, struct adapter,
2786 db_drop_task);
2787 unsigned long delay = 1000;
2788 unsigned short r;
2789
2790 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2791
2792 /*
2793 * Sleep a while before ringing the driver qset dbs.
2794 * The delay is between 1000-2023 usecs.
2795 */
2796 get_random_bytes(&r, 2);
2797 delay += r & 1023;
2798 set_current_state(TASK_UNINTERRUPTIBLE);
2799 schedule_timeout(usecs_to_jiffies(delay));
2800 ring_dbs(adapter);
2801}
2802
2753/* 2803/*
2754 * Processes external (PHY) interrupts in process context. 2804 * Processes external (PHY) interrupts in process context.
2755 */ 2805 */
@@ -3218,6 +3268,11 @@ static int __devinit init_one(struct pci_dev *pdev,
3218 INIT_LIST_HEAD(&adapter->adapter_list); 3268 INIT_LIST_HEAD(&adapter->adapter_list);
3219 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); 3269 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3220 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3270 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3271
3272 INIT_WORK(&adapter->db_full_task, db_full_task);
3273 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3274 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3275
3221 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3276 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3222 3277
3223 for (i = 0; i < ai->nports0 + ai->nports1; ++i) { 3278 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042da..929c298115ca 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN, 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN, 75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP 76 OFFLOAD_PORT_UP,
77 OFFLOAD_DB_FULL,
78 OFFLOAD_DB_EMPTY,
79 OFFLOAD_DB_DROP
77}; 80};
78 81
79struct cxgb3_client { 82struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a965..cb42353c9fdd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) 254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) 255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
256 256
257#define S_HIPRIORITYDBFULL 7
258#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
259#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
260
261#define S_HIPRIORITYDBEMPTY 6
262#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
263#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
264
265#define S_LOPRIORITYDBFULL 5
266#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
267#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
268
269#define S_LOPRIORITYDBEMPTY 4
270#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
271#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
272
257#define S_RSPQDISABLED 3 273#define S_RSPQDISABLED 3
258#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) 274#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
259#define F_RSPQDISABLED V_RSPQDISABLED(1U) 275#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 048205903741..78e265b484b6 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
42#include "sge_defs.h" 42#include "sge_defs.h"
43#include "t3_cpl.h" 43#include "t3_cpl.h"
44#include "firmware_exports.h" 44#include "firmware_exports.h"
45#include "cxgb3_offload.h"
45 46
46#define USE_GTS 0 47#define USE_GTS 0
47 48
@@ -2841,8 +2842,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2841 } 2842 }
2842 2843
2843 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2844 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2844 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", 2845 queue_work(cxgb3_wq, &adapter->db_drop_task);
2845 status & F_HIPIODRBDROPERR ? "high" : "lo"); 2846
2847 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2848 queue_work(cxgb3_wq, &adapter->db_full_task);
2849
2850 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2851 queue_work(cxgb3_wq, &adapter->db_empty_task);
2846 2852
2847 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2853 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2848 if (status & SGE_FATALERR) 2854 if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3ab9f51918aa..95a8ba0759f1 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1433,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1436 F_HIRCQPARITYERROR) 1436 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1437 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1438 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1439 F_LOPIODRBDROPERR)
1437#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ 1440#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1438 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ 1441 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1439 F_NFASRCHFAIL) 1442 F_NFASRCHFAIL)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index ad113b0f62db..0950fa40684f 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2908,6 +2908,7 @@ enum parport_pc_pci_cards {
2908 netmos_9805, 2908 netmos_9805,
2909 netmos_9815, 2909 netmos_9815,
2910 netmos_9901, 2910 netmos_9901,
2911 netmos_9865,
2911 quatech_sppxp100, 2912 quatech_sppxp100,
2912}; 2913};
2913 2914
@@ -2989,6 +2990,7 @@ static struct parport_pc_pci {
2989 /* netmos_9805 */ { 1, { { 0, -1 }, } }, 2990 /* netmos_9805 */ { 1, { { 0, -1 }, } },
2990 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, 2991 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
2991 /* netmos_9901 */ { 1, { { 0, -1 }, } }, 2992 /* netmos_9901 */ { 1, { { 0, -1 }, } },
2993 /* netmos_9865 */ { 1, { { 0, -1 }, } },
2992 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, 2994 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
2993}; 2995};
2994 2996
@@ -3092,6 +3094,10 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
3092 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, 3094 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
3093 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, 3095 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
3094 0xA000, 0x2000, 0, 0, netmos_9901 }, 3096 0xA000, 0x2000, 0, 0, netmos_9901 },
3097 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3098 0xA000, 0x1000, 0, 0, netmos_9865 },
3099 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3100 0xA000, 0x2000, 0, 0, netmos_9865 },
3095 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ 3101 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
3096 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, 3102 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
3097 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, 3103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index ec73294d1fa6..e2dc289f767c 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -40,7 +40,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno);
40static int once_over (void); 40static int once_over (void);
41static int remove_ranges (struct bus_node *, struct bus_node *); 41static int remove_ranges (struct bus_node *, struct bus_node *);
42static int update_bridge_ranges (struct bus_node **); 42static int update_bridge_ranges (struct bus_node **);
43static int add_range (int type, struct range_node *, struct bus_node *); 43static int add_bus_range (int type, struct range_node *, struct bus_node *);
44static void fix_resources (struct bus_node *); 44static void fix_resources (struct bus_node *);
45static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8); 45static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
46 46
@@ -133,7 +133,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
133 newrange->rangeno = 1; 133 newrange->rangeno = 1;
134 else { 134 else {
135 /* need to insert our range */ 135 /* need to insert our range */
136 add_range (flag, newrange, newbus); 136 add_bus_range (flag, newrange, newbus);
137 debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); 137 debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end);
138 } 138 }
139 139
@@ -384,7 +384,7 @@ int __init ibmphp_rsrc_init (void)
384 * Input: type of the resource, range to add, current bus 384 * Input: type of the resource, range to add, current bus
385 * Output: 0 or -1, bus and range ptrs 385 * Output: 0 or -1, bus and range ptrs
386 ********************************************************************************/ 386 ********************************************************************************/
387static int add_range (int type, struct range_node *range, struct bus_node *bus_cur) 387static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
388{ 388{
389 struct range_node *range_cur = NULL; 389 struct range_node *range_cur = NULL;
390 struct range_node *range_prev; 390 struct range_node *range_prev;
@@ -455,7 +455,7 @@ static int add_range (int type, struct range_node *range, struct bus_node *bus_c
455 455
456/******************************************************************************* 456/*******************************************************************************
457 * This routine goes through the list of resources of type 'type' and updates 457 * This routine goes through the list of resources of type 'type' and updates
458 * the range numbers that they correspond to. It was called from add_range fnc 458 * the range numbers that they correspond to. It was called from add_bus_range fnc
459 * 459 *
460 * Input: bus, type of the resource, the rangeno starting from which to update 460 * Input: bus, type of the resource, the rangeno starting from which to update
461 ******************************************************************************/ 461 ******************************************************************************/
@@ -1999,7 +1999,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
1999 1999
2000 if (bus_sec->noIORanges > 0) { 2000 if (bus_sec->noIORanges > 0) {
2001 if (!range_exists_already (range, bus_sec, IO)) { 2001 if (!range_exists_already (range, bus_sec, IO)) {
2002 add_range (IO, range, bus_sec); 2002 add_bus_range (IO, range, bus_sec);
2003 ++bus_sec->noIORanges; 2003 ++bus_sec->noIORanges;
2004 } else { 2004 } else {
2005 kfree (range); 2005 kfree (range);
@@ -2048,7 +2048,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
2048 2048
2049 if (bus_sec->noMemRanges > 0) { 2049 if (bus_sec->noMemRanges > 0) {
2050 if (!range_exists_already (range, bus_sec, MEM)) { 2050 if (!range_exists_already (range, bus_sec, MEM)) {
2051 add_range (MEM, range, bus_sec); 2051 add_bus_range (MEM, range, bus_sec);
2052 ++bus_sec->noMemRanges; 2052 ++bus_sec->noMemRanges;
2053 } else { 2053 } else {
2054 kfree (range); 2054 kfree (range);
@@ -2102,7 +2102,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
2102 2102
2103 if (bus_sec->noPFMemRanges > 0) { 2103 if (bus_sec->noPFMemRanges > 0) {
2104 if (!range_exists_already (range, bus_sec, PFMEM)) { 2104 if (!range_exists_already (range, bus_sec, PFMEM)) {
2105 add_range (PFMEM, range, bus_sec); 2105 add_bus_range (PFMEM, range, bus_sec);
2106 ++bus_sec->noPFMemRanges; 2106 ++bus_sec->noPFMemRanges;
2107 } else { 2107 } else {
2108 kfree (range); 2108 kfree (range);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 0a6601c76809..d189e4743e69 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -51,17 +51,23 @@ config PCMCIA_LOAD_CIS
51 51
52config PCMCIA_IOCTL 52config PCMCIA_IOCTL
53 bool "PCMCIA control ioctl (obsolete)" 53 bool "PCMCIA control ioctl (obsolete)"
54 depends on PCMCIA 54 depends on PCMCIA && ARM && !SMP && !PREEMPT
55 default y 55 default y
56 help 56 help
57 If you say Y here, the deprecated ioctl interface to the PCMCIA 57 If you say Y here, the deprecated ioctl interface to the PCMCIA
58 subsystem will be built. It is needed by cardmgr and cardctl 58 subsystem will be built. It is needed by the deprecated pcmcia-cs
59 (pcmcia-cs) to function properly. 59 tools (cardmgr, cardctl) to function properly.
60 60
61 You should use the new pcmciautils package instead (see 61 You should use the new pcmciautils package instead (see
62 <file:Documentation/Changes> for location and details). 62 <file:Documentation/Changes> for location and details).
63 63
64 If unsure, say Y. 64 This config option will most likely be removed from kernel 2.6.35,
65 the associated code from kernel 2.6.36.
66
67 As the PCMCIA ioctl is not locking safe, it depends on !SMP and
68 !PREEMPT.
69
70 If unsure, say N.
65 71
66config CARDBUS 72config CARDBUS
67 bool "32-bit CardBus support" 73 bool "32-bit CardBus support"
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index ac0686efbf75..e6ab2a47d8cb 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -71,7 +71,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
71 unsigned int max, pass; 71 unsigned int max, pass;
72 72
73 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); 73 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
74 pci_fixup_cardbus(bus); 74 pci_fixup_cardbus(bus);
75 75
76 max = bus->secondary; 76 max = bus->secondary;
77 for (pass = 0; pass < 2; pass++) 77 for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 2f3622dd4b69..f230f6543bff 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -54,46 +54,44 @@ static const u_int exponent[] = {
54/* Upper limit on reasonable # of tuples */ 54/* Upper limit on reasonable # of tuples */
55#define MAX_TUPLES 200 55#define MAX_TUPLES 200
56 56
57/*====================================================================*/
58
59/* Parameters that can be set with 'insmod' */
60
61/* 16-bit CIS? */ 57/* 16-bit CIS? */
62static int cis_width; 58static int cis_width;
63module_param(cis_width, int, 0444); 59module_param(cis_width, int, 0444);
64 60
65void release_cis_mem(struct pcmcia_socket *s) 61void release_cis_mem(struct pcmcia_socket *s)
66{ 62{
67 mutex_lock(&s->ops_mutex); 63 mutex_lock(&s->ops_mutex);
68 if (s->cis_mem.flags & MAP_ACTIVE) { 64 if (s->cis_mem.flags & MAP_ACTIVE) {
69 s->cis_mem.flags &= ~MAP_ACTIVE; 65 s->cis_mem.flags &= ~MAP_ACTIVE;
70 s->ops->set_mem_map(s, &s->cis_mem); 66 s->ops->set_mem_map(s, &s->cis_mem);
71 if (s->cis_mem.res) { 67 if (s->cis_mem.res) {
72 release_resource(s->cis_mem.res); 68 release_resource(s->cis_mem.res);
73 kfree(s->cis_mem.res); 69 kfree(s->cis_mem.res);
74 s->cis_mem.res = NULL; 70 s->cis_mem.res = NULL;
71 }
72 iounmap(s->cis_virt);
73 s->cis_virt = NULL;
75 } 74 }
76 iounmap(s->cis_virt); 75 mutex_unlock(&s->ops_mutex);
77 s->cis_virt = NULL;
78 }
79 mutex_unlock(&s->ops_mutex);
80} 76}
81 77
82/* 78/**
83 * Map the card memory at "card_offset" into virtual space. 79 * set_cis_map() - map the card memory at "card_offset" into virtual space.
80 *
84 * If flags & MAP_ATTRIB, map the attribute space, otherwise 81 * If flags & MAP_ATTRIB, map the attribute space, otherwise
85 * map the memory space. 82 * map the memory space.
86 * 83 *
87 * Must be called with ops_mutex held. 84 * Must be called with ops_mutex held.
88 */ 85 */
89static void __iomem * 86static void __iomem *set_cis_map(struct pcmcia_socket *s,
90set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags) 87 unsigned int card_offset, unsigned int flags)
91{ 88{
92 pccard_mem_map *mem = &s->cis_mem; 89 pccard_mem_map *mem = &s->cis_mem;
93 int ret; 90 int ret;
94 91
95 if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) { 92 if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) {
96 mem->res = pcmcia_find_mem_region(0, s->map_size, s->map_size, 0, s); 93 mem->res = pcmcia_find_mem_region(0, s->map_size,
94 s->map_size, 0, s);
97 if (mem->res == NULL) { 95 if (mem->res == NULL) {
98 dev_printk(KERN_NOTICE, &s->dev, 96 dev_printk(KERN_NOTICE, &s->dev,
99 "cs: unable to map card memory!\n"); 97 "cs: unable to map card memory!\n");
@@ -124,165 +122,170 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag
124 return s->cis_virt; 122 return s->cis_virt;
125} 123}
126 124
127/*======================================================================
128
129 Low-level functions to read and write CIS memory. I think the
130 write routine is only useful for writing one-byte registers.
131
132======================================================================*/
133 125
134/* Bits in attr field */ 126/* Bits in attr field */
135#define IS_ATTR 1 127#define IS_ATTR 1
136#define IS_INDIRECT 8 128#define IS_INDIRECT 8
137 129
130/**
131 * pcmcia_read_cis_mem() - low-level function to read CIS memory
132 */
138int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, 133int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
139 u_int len, void *ptr) 134 u_int len, void *ptr)
140{ 135{
141 void __iomem *sys, *end; 136 void __iomem *sys, *end;
142 unsigned char *buf = ptr; 137 unsigned char *buf = ptr;
143
144 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
145
146 mutex_lock(&s->ops_mutex);
147 if (attr & IS_INDIRECT) {
148 /* Indirect accesses use a bunch of special registers at fixed
149 locations in common memory */
150 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
151 if (attr & IS_ATTR) {
152 addr *= 2;
153 flags = ICTRL0_AUTOINC;
154 }
155 138
156 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); 139 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
157 if (!sys) {
158 dev_dbg(&s->dev, "could not map memory\n");
159 memset(ptr, 0xff, len);
160 mutex_unlock(&s->ops_mutex);
161 return -1;
162 }
163 140
164 writeb(flags, sys+CISREG_ICTRL0); 141 mutex_lock(&s->ops_mutex);
165 writeb(addr & 0xff, sys+CISREG_IADDR0); 142 if (attr & IS_INDIRECT) {
166 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); 143 /* Indirect accesses use a bunch of special registers at fixed
167 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); 144 locations in common memory */
168 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); 145 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
169 for ( ; len > 0; len--, buf++) 146 if (attr & IS_ATTR) {
170 *buf = readb(sys+CISREG_IDATA0); 147 addr *= 2;
171 } else { 148 flags = ICTRL0_AUTOINC;
172 u_int inc = 1, card_offset, flags; 149 }
173
174 if (addr > CISTPL_MAX_CIS_SIZE)
175 dev_dbg(&s->dev, "attempt to read CIS mem at addr %#x", addr);
176
177 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
178 if (attr) {
179 flags |= MAP_ATTRIB;
180 inc++;
181 addr *= 2;
182 }
183 150
184 card_offset = addr & ~(s->map_size-1); 151 sys = set_cis_map(s, 0, MAP_ACTIVE |
185 while (len) { 152 ((cis_width) ? MAP_16BIT : 0));
186 sys = set_cis_map(s, card_offset, flags); 153 if (!sys) {
187 if (!sys) { 154 dev_dbg(&s->dev, "could not map memory\n");
188 dev_dbg(&s->dev, "could not map memory\n"); 155 memset(ptr, 0xff, len);
189 memset(ptr, 0xff, len); 156 mutex_unlock(&s->ops_mutex);
190 mutex_unlock(&s->ops_mutex); 157 return -1;
191 return -1; 158 }
192 } 159
193 end = sys + s->map_size; 160 writeb(flags, sys+CISREG_ICTRL0);
194 sys = sys + (addr & (s->map_size-1)); 161 writeb(addr & 0xff, sys+CISREG_IADDR0);
195 for ( ; len > 0; len--, buf++, sys += inc) { 162 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
196 if (sys == end) 163 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
197 break; 164 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
198 *buf = readb(sys); 165 for ( ; len > 0; len--, buf++)
199 } 166 *buf = readb(sys+CISREG_IDATA0);
200 card_offset += s->map_size; 167 } else {
201 addr = 0; 168 u_int inc = 1, card_offset, flags;
169
170 if (addr > CISTPL_MAX_CIS_SIZE)
171 dev_dbg(&s->dev,
172 "attempt to read CIS mem at addr %#x", addr);
173
174 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
175 if (attr) {
176 flags |= MAP_ATTRIB;
177 inc++;
178 addr *= 2;
179 }
180
181 card_offset = addr & ~(s->map_size-1);
182 while (len) {
183 sys = set_cis_map(s, card_offset, flags);
184 if (!sys) {
185 dev_dbg(&s->dev, "could not map memory\n");
186 memset(ptr, 0xff, len);
187 mutex_unlock(&s->ops_mutex);
188 return -1;
189 }
190 end = sys + s->map_size;
191 sys = sys + (addr & (s->map_size-1));
192 for ( ; len > 0; len--, buf++, sys += inc) {
193 if (sys == end)
194 break;
195 *buf = readb(sys);
196 }
197 card_offset += s->map_size;
198 addr = 0;
199 }
202 } 200 }
203 } 201 mutex_unlock(&s->ops_mutex);
204 mutex_unlock(&s->ops_mutex); 202 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
205 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", 203 *(u_char *)(ptr+0), *(u_char *)(ptr+1),
206 *(u_char *)(ptr+0), *(u_char *)(ptr+1), 204 *(u_char *)(ptr+2), *(u_char *)(ptr+3));
207 *(u_char *)(ptr+2), *(u_char *)(ptr+3)); 205 return 0;
208 return 0;
209} 206}
210 207
211 208
209/**
210 * pcmcia_write_cis_mem() - low-level function to write CIS memory
211 *
212 * Probably only useful for writing one-byte registers.
213 */
212void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, 214void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
213 u_int len, void *ptr) 215 u_int len, void *ptr)
214{ 216{
215 void __iomem *sys, *end; 217 void __iomem *sys, *end;
216 unsigned char *buf = ptr; 218 unsigned char *buf = ptr;
217
218 dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
219
220 mutex_lock(&s->ops_mutex);
221 if (attr & IS_INDIRECT) {
222 /* Indirect accesses use a bunch of special registers at fixed
223 locations in common memory */
224 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
225 if (attr & IS_ATTR) {
226 addr *= 2;
227 flags = ICTRL0_AUTOINC;
228 }
229 219
230 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); 220 dev_dbg(&s->dev,
231 if (!sys) { 221 "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
232 dev_dbg(&s->dev, "could not map memory\n");
233 mutex_unlock(&s->ops_mutex);
234 return; /* FIXME: Error */
235 }
236 222
237 writeb(flags, sys+CISREG_ICTRL0); 223 mutex_lock(&s->ops_mutex);
238 writeb(addr & 0xff, sys+CISREG_IADDR0); 224 if (attr & IS_INDIRECT) {
239 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); 225 /* Indirect accesses use a bunch of special registers at fixed
240 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); 226 locations in common memory */
241 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); 227 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
242 for ( ; len > 0; len--, buf++) 228 if (attr & IS_ATTR) {
243 writeb(*buf, sys+CISREG_IDATA0); 229 addr *= 2;
244 } else { 230 flags = ICTRL0_AUTOINC;
245 u_int inc = 1, card_offset, flags; 231 }
246
247 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
248 if (attr & IS_ATTR) {
249 flags |= MAP_ATTRIB;
250 inc++;
251 addr *= 2;
252 }
253 232
254 card_offset = addr & ~(s->map_size-1); 233 sys = set_cis_map(s, 0, MAP_ACTIVE |
255 while (len) { 234 ((cis_width) ? MAP_16BIT : 0));
256 sys = set_cis_map(s, card_offset, flags); 235 if (!sys) {
257 if (!sys) { 236 dev_dbg(&s->dev, "could not map memory\n");
258 dev_dbg(&s->dev, "could not map memory\n"); 237 mutex_unlock(&s->ops_mutex);
259 mutex_unlock(&s->ops_mutex); 238 return; /* FIXME: Error */
260 return; /* FIXME: error */ 239 }
261 } 240
262 241 writeb(flags, sys+CISREG_ICTRL0);
263 end = sys + s->map_size; 242 writeb(addr & 0xff, sys+CISREG_IADDR0);
264 sys = sys + (addr & (s->map_size-1)); 243 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
265 for ( ; len > 0; len--, buf++, sys += inc) { 244 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
266 if (sys == end) 245 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
267 break; 246 for ( ; len > 0; len--, buf++)
268 writeb(*buf, sys); 247 writeb(*buf, sys+CISREG_IDATA0);
269 } 248 } else {
270 card_offset += s->map_size; 249 u_int inc = 1, card_offset, flags;
271 addr = 0;
272 }
273 }
274 mutex_unlock(&s->ops_mutex);
275}
276 250
251 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
252 if (attr & IS_ATTR) {
253 flags |= MAP_ATTRIB;
254 inc++;
255 addr *= 2;
256 }
277 257
278/*====================================================================== 258 card_offset = addr & ~(s->map_size-1);
259 while (len) {
260 sys = set_cis_map(s, card_offset, flags);
261 if (!sys) {
262 dev_dbg(&s->dev, "could not map memory\n");
263 mutex_unlock(&s->ops_mutex);
264 return; /* FIXME: error */
265 }
279 266
280 This is a wrapper around read_cis_mem, with the same interface, 267 end = sys + s->map_size;
281 but which caches information, for cards whose CIS may not be 268 sys = sys + (addr & (s->map_size-1));
282 readable all the time. 269 for ( ; len > 0; len--, buf++, sys += inc) {
270 if (sys == end)
271 break;
272 writeb(*buf, sys);
273 }
274 card_offset += s->map_size;
275 addr = 0;
276 }
277 }
278 mutex_unlock(&s->ops_mutex);
279}
283 280
284======================================================================*/
285 281
282/**
283 * read_cis_cache() - read CIS memory or its associated cache
284 *
285 * This is a wrapper around read_cis_mem, with the same interface,
286 * but which caches information, for cards whose CIS may not be
287 * readable all the time.
288 */
286static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, 289static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
287 size_t len, void *ptr) 290 size_t len, void *ptr)
288{ 291{
@@ -353,7 +356,6 @@ remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len)
353 * This destroys the CIS cache but keeps any fake CIS alive. Must be 356 * This destroys the CIS cache but keeps any fake CIS alive. Must be
354 * called with ops_mutex held. 357 * called with ops_mutex held.
355 */ 358 */
356
357void destroy_cis_cache(struct pcmcia_socket *s) 359void destroy_cis_cache(struct pcmcia_socket *s)
358{ 360{
359 struct list_head *l, *n; 361 struct list_head *l, *n;
@@ -366,13 +368,9 @@ void destroy_cis_cache(struct pcmcia_socket *s)
366 } 368 }
367} 369}
368 370
369/*====================================================================== 371/**
370 372 * verify_cis_cache() - does the CIS match what is in the CIS cache?
371 This verifies if the CIS of a card matches what is in the CIS 373 */
372 cache.
373
374======================================================================*/
375
376int verify_cis_cache(struct pcmcia_socket *s) 374int verify_cis_cache(struct pcmcia_socket *s)
377{ 375{
378 struct cis_cache_entry *cis; 376 struct cis_cache_entry *cis;
@@ -404,13 +402,12 @@ int verify_cis_cache(struct pcmcia_socket *s)
404 return 0; 402 return 0;
405} 403}
406 404
407/*====================================================================== 405/**
408 406 * pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS
409 For really bad cards, we provide a facility for uploading a 407 *
410 replacement CIS. 408 * For really bad cards, we provide a facility for uploading a
411 409 * replacement CIS.
412======================================================================*/ 410 */
413
414int pcmcia_replace_cis(struct pcmcia_socket *s, 411int pcmcia_replace_cis(struct pcmcia_socket *s,
415 const u8 *data, const size_t len) 412 const u8 *data, const size_t len)
416{ 413{
@@ -433,17 +430,13 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
433 return 0; 430 return 0;
434} 431}
435 432
436/*====================================================================== 433/* The high-level CIS tuple services */
437
438 The high-level CIS tuple services
439
440======================================================================*/
441 434
442typedef struct tuple_flags { 435typedef struct tuple_flags {
443 u_int link_space:4; 436 u_int link_space:4;
444 u_int has_link:1; 437 u_int has_link:1;
445 u_int mfc_fn:3; 438 u_int mfc_fn:3;
446 u_int space:4; 439 u_int space:4;
447} tuple_flags; 440} tuple_flags;
448 441
449#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space) 442#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space)
@@ -451,982 +444,961 @@ typedef struct tuple_flags {
451#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn) 444#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
452#define SPACE(f) (((tuple_flags *)(&(f)))->space) 445#define SPACE(f) (((tuple_flags *)(&(f)))->space)
453 446
454int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) 447int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
448 tuple_t *tuple)
455{ 449{
456 if (!s) 450 if (!s)
457 return -EINVAL; 451 return -EINVAL;
458 452
459 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) 453 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
460 return -ENODEV; 454 return -ENODEV;
461 tuple->TupleLink = tuple->Flags = 0; 455 tuple->TupleLink = tuple->Flags = 0;
462 456
463 /* Assume presence of a LONGLINK_C to address 0 */ 457 /* Assume presence of a LONGLINK_C to address 0 */
464 tuple->CISOffset = tuple->LinkOffset = 0; 458 tuple->CISOffset = tuple->LinkOffset = 0;
465 SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1; 459 SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
466 460
467 if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) { 461 if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
468 cisdata_t req = tuple->DesiredTuple; 462 cisdata_t req = tuple->DesiredTuple;
469 tuple->DesiredTuple = CISTPL_LONGLINK_MFC; 463 tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
470 if (pccard_get_next_tuple(s, function, tuple) == 0) { 464 if (pccard_get_next_tuple(s, function, tuple) == 0) {
471 tuple->DesiredTuple = CISTPL_LINKTARGET; 465 tuple->DesiredTuple = CISTPL_LINKTARGET;
472 if (pccard_get_next_tuple(s, function, tuple) != 0) 466 if (pccard_get_next_tuple(s, function, tuple) != 0)
473 return -ENOSPC; 467 return -ENOSPC;
474 } else 468 } else
475 tuple->CISOffset = tuple->TupleLink = 0; 469 tuple->CISOffset = tuple->TupleLink = 0;
476 tuple->DesiredTuple = req; 470 tuple->DesiredTuple = req;
477 } 471 }
478 return pccard_get_next_tuple(s, function, tuple); 472 return pccard_get_next_tuple(s, function, tuple);
479} 473}
480 474
481static int follow_link(struct pcmcia_socket *s, tuple_t *tuple) 475static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
482{ 476{
483 u_char link[5]; 477 u_char link[5];
484 u_int ofs; 478 u_int ofs;
485 int ret; 479 int ret;
486 480
487 if (MFC_FN(tuple->Flags)) { 481 if (MFC_FN(tuple->Flags)) {
488 /* Get indirect link from the MFC tuple */ 482 /* Get indirect link from the MFC tuple */
489 ret = read_cis_cache(s, LINK_SPACE(tuple->Flags), 483 ret = read_cis_cache(s, LINK_SPACE(tuple->Flags),
490 tuple->LinkOffset, 5, link); 484 tuple->LinkOffset, 5, link);
491 if (ret) 485 if (ret)
486 return -1;
487 ofs = get_unaligned_le32(link + 1);
488 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
489 /* Move to the next indirect link */
490 tuple->LinkOffset += 5;
491 MFC_FN(tuple->Flags)--;
492 } else if (HAS_LINK(tuple->Flags)) {
493 ofs = tuple->LinkOffset;
494 SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
495 HAS_LINK(tuple->Flags) = 0;
496 } else
492 return -1; 497 return -1;
493 ofs = get_unaligned_le32(link + 1); 498
494 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); 499 if (SPACE(tuple->Flags)) {
495 /* Move to the next indirect link */ 500 /* This is ugly, but a common CIS error is to code the long
496 tuple->LinkOffset += 5; 501 link offset incorrectly, so we check the right spot... */
497 MFC_FN(tuple->Flags)--; 502 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
498 } else if (HAS_LINK(tuple->Flags)) { 503 if (ret)
499 ofs = tuple->LinkOffset; 504 return -1;
500 SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags); 505 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
501 HAS_LINK(tuple->Flags) = 0; 506 (strncmp(link+2, "CIS", 3) == 0))
502 } else { 507 return ofs;
503 return -1; 508 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
504 } 509 /* Then, we try the wrong spot... */
505 if (SPACE(tuple->Flags)) { 510 ofs = ofs >> 1;
506 /* This is ugly, but a common CIS error is to code the long 511 }
507 link offset incorrectly, so we check the right spot... */
508 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); 512 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
509 if (ret) 513 if (ret)
510 return -1; 514 return -1;
511 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && 515 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
512 (strncmp(link+2, "CIS", 3) == 0)) 516 (strncmp(link+2, "CIS", 3) == 0))
513 return ofs; 517 return ofs;
514 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5); 518 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
515 /* Then, we try the wrong spot... */ 519 return -1;
516 ofs = ofs >> 1;
517 }
518 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
519 if (ret)
520 return -1;
521 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
522 (strncmp(link+2, "CIS", 3) == 0))
523 return ofs;
524 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
525 return -1;
526} 520}
527 521
528int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) 522int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
523 tuple_t *tuple)
529{ 524{
530 u_char link[2], tmp; 525 u_char link[2], tmp;
531 int ofs, i, attr; 526 int ofs, i, attr;
532 int ret; 527 int ret;
533
534 if (!s)
535 return -EINVAL;
536 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
537 return -ENODEV;
538
539 link[1] = tuple->TupleLink;
540 ofs = tuple->CISOffset + tuple->TupleLink;
541 attr = SPACE(tuple->Flags);
542
543 for (i = 0; i < MAX_TUPLES; i++) {
544 if (link[1] == 0xff) {
545 link[0] = CISTPL_END;
546 } else {
547 ret = read_cis_cache(s, attr, ofs, 2, link);
548 if (ret)
549 return -1;
550 if (link[0] == CISTPL_NULL) {
551 ofs++; continue;
552 }
553 }
554 528
555 /* End of chain? Follow long link if possible */ 529 if (!s)
556 if (link[0] == CISTPL_END) { 530 return -EINVAL;
557 ofs = follow_link(s, tuple); 531 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
558 if (ofs < 0) 532 return -ENODEV;
559 return -ENOSPC;
560 attr = SPACE(tuple->Flags);
561 ret = read_cis_cache(s, attr, ofs, 2, link);
562 if (ret)
563 return -1;
564 }
565 533
566 /* Is this a link tuple? Make a note of it */ 534 link[1] = tuple->TupleLink;
567 if ((link[0] == CISTPL_LONGLINK_A) || 535 ofs = tuple->CISOffset + tuple->TupleLink;
568 (link[0] == CISTPL_LONGLINK_C) || 536 attr = SPACE(tuple->Flags);
569 (link[0] == CISTPL_LONGLINK_MFC) || 537
570 (link[0] == CISTPL_LINKTARGET) || 538 for (i = 0; i < MAX_TUPLES; i++) {
571 (link[0] == CISTPL_INDIRECT) || 539 if (link[1] == 0xff)
572 (link[0] == CISTPL_NO_LINK)) { 540 link[0] = CISTPL_END;
573 switch (link[0]) { 541 else {
574 case CISTPL_LONGLINK_A: 542 ret = read_cis_cache(s, attr, ofs, 2, link);
575 HAS_LINK(tuple->Flags) = 1; 543 if (ret)
576 LINK_SPACE(tuple->Flags) = attr | IS_ATTR; 544 return -1;
577 ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); 545 if (link[0] == CISTPL_NULL) {
578 if (ret) 546 ofs++;
579 return -1; 547 continue;
580 break; 548 }
581 case CISTPL_LONGLINK_C:
582 HAS_LINK(tuple->Flags) = 1;
583 LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
584 ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
585 if (ret)
586 return -1;
587 break;
588 case CISTPL_INDIRECT:
589 HAS_LINK(tuple->Flags) = 1;
590 LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT;
591 tuple->LinkOffset = 0;
592 break;
593 case CISTPL_LONGLINK_MFC:
594 tuple->LinkOffset = ofs + 3;
595 LINK_SPACE(tuple->Flags) = attr;
596 if (function == BIND_FN_ALL) {
597 /* Follow all the MFC links */
598 ret = read_cis_cache(s, attr, ofs+2, 1, &tmp);
599 if (ret)
600 return -1;
601 MFC_FN(tuple->Flags) = tmp;
602 } else {
603 /* Follow exactly one of the links */
604 MFC_FN(tuple->Flags) = 1;
605 tuple->LinkOffset += function * 5;
606 } 549 }
607 break;
608 case CISTPL_NO_LINK:
609 HAS_LINK(tuple->Flags) = 0;
610 break;
611 }
612 if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
613 (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
614 break;
615 } else
616 if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
617 break;
618 550
619 if (link[0] == tuple->DesiredTuple) 551 /* End of chain? Follow long link if possible */
620 break; 552 if (link[0] == CISTPL_END) {
621 ofs += link[1] + 2; 553 ofs = follow_link(s, tuple);
622 } 554 if (ofs < 0)
623 if (i == MAX_TUPLES) { 555 return -ENOSPC;
624 dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n"); 556 attr = SPACE(tuple->Flags);
625 return -ENOSPC; 557 ret = read_cis_cache(s, attr, ofs, 2, link);
626 } 558 if (ret)
627 559 return -1;
628 tuple->TupleCode = link[0]; 560 }
629 tuple->TupleLink = link[1];
630 tuple->CISOffset = ofs + 2;
631 return 0;
632}
633 561
634/*====================================================================*/ 562 /* Is this a link tuple? Make a note of it */
563 if ((link[0] == CISTPL_LONGLINK_A) ||
564 (link[0] == CISTPL_LONGLINK_C) ||
565 (link[0] == CISTPL_LONGLINK_MFC) ||
566 (link[0] == CISTPL_LINKTARGET) ||
567 (link[0] == CISTPL_INDIRECT) ||
568 (link[0] == CISTPL_NO_LINK)) {
569 switch (link[0]) {
570 case CISTPL_LONGLINK_A:
571 HAS_LINK(tuple->Flags) = 1;
572 LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
573 ret = read_cis_cache(s, attr, ofs+2, 4,
574 &tuple->LinkOffset);
575 if (ret)
576 return -1;
577 break;
578 case CISTPL_LONGLINK_C:
579 HAS_LINK(tuple->Flags) = 1;
580 LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
581 ret = read_cis_cache(s, attr, ofs+2, 4,
582 &tuple->LinkOffset);
583 if (ret)
584 return -1;
585 break;
586 case CISTPL_INDIRECT:
587 HAS_LINK(tuple->Flags) = 1;
588 LINK_SPACE(tuple->Flags) = IS_ATTR |
589 IS_INDIRECT;
590 tuple->LinkOffset = 0;
591 break;
592 case CISTPL_LONGLINK_MFC:
593 tuple->LinkOffset = ofs + 3;
594 LINK_SPACE(tuple->Flags) = attr;
595 if (function == BIND_FN_ALL) {
596 /* Follow all the MFC links */
597 ret = read_cis_cache(s, attr, ofs+2,
598 1, &tmp);
599 if (ret)
600 return -1;
601 MFC_FN(tuple->Flags) = tmp;
602 } else {
603 /* Follow exactly one of the links */
604 MFC_FN(tuple->Flags) = 1;
605 tuple->LinkOffset += function * 5;
606 }
607 break;
608 case CISTPL_NO_LINK:
609 HAS_LINK(tuple->Flags) = 0;
610 break;
611 }
612 if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
613 (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
614 break;
615 } else
616 if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
617 break;
618
619 if (link[0] == tuple->DesiredTuple)
620 break;
621 ofs += link[1] + 2;
622 }
623 if (i == MAX_TUPLES) {
624 dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
625 return -ENOSPC;
626 }
635 627
636#define _MIN(a, b) (((a) < (b)) ? (a) : (b)) 628 tuple->TupleCode = link[0];
629 tuple->TupleLink = link[1];
630 tuple->CISOffset = ofs + 2;
631 return 0;
632}
637 633
638int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple) 634int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple)
639{ 635{
640 u_int len; 636 u_int len;
641 int ret; 637 int ret;
642 638
643 if (!s) 639 if (!s)
644 return -EINVAL; 640 return -EINVAL;
645 641
646 if (tuple->TupleLink < tuple->TupleOffset) 642 if (tuple->TupleLink < tuple->TupleOffset)
647 return -ENOSPC; 643 return -ENOSPC;
648 len = tuple->TupleLink - tuple->TupleOffset; 644 len = tuple->TupleLink - tuple->TupleOffset;
649 tuple->TupleDataLen = tuple->TupleLink; 645 tuple->TupleDataLen = tuple->TupleLink;
650 if (len == 0) 646 if (len == 0)
647 return 0;
648 ret = read_cis_cache(s, SPACE(tuple->Flags),
649 tuple->CISOffset + tuple->TupleOffset,
650 min(len, (u_int) tuple->TupleDataMax),
651 tuple->TupleData);
652 if (ret)
653 return -1;
651 return 0; 654 return 0;
652 ret = read_cis_cache(s, SPACE(tuple->Flags),
653 tuple->CISOffset + tuple->TupleOffset,
654 _MIN(len, tuple->TupleDataMax), tuple->TupleData);
655 if (ret)
656 return -1;
657 return 0;
658} 655}
659 656
660 657
661/*====================================================================== 658/* Parsing routines for individual tuples */
662
663 Parsing routines for individual tuples
664
665======================================================================*/
666 659
667static int parse_device(tuple_t *tuple, cistpl_device_t *device) 660static int parse_device(tuple_t *tuple, cistpl_device_t *device)
668{ 661{
669 int i; 662 int i;
670 u_char scale; 663 u_char scale;
671 u_char *p, *q; 664 u_char *p, *q;
672 665
673 p = (u_char *)tuple->TupleData; 666 p = (u_char *)tuple->TupleData;
674 q = p + tuple->TupleDataLen; 667 q = p + tuple->TupleDataLen;
675 668
676 device->ndev = 0; 669 device->ndev = 0;
677 for (i = 0; i < CISTPL_MAX_DEVICES; i++) { 670 for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
678 671
679 if (*p == 0xff) 672 if (*p == 0xff)
680 break; 673 break;
681 device->dev[i].type = (*p >> 4); 674 device->dev[i].type = (*p >> 4);
682 device->dev[i].wp = (*p & 0x08) ? 1 : 0; 675 device->dev[i].wp = (*p & 0x08) ? 1 : 0;
683 switch (*p & 0x07) { 676 switch (*p & 0x07) {
684 case 0: 677 case 0:
685 device->dev[i].speed = 0; 678 device->dev[i].speed = 0;
686 break; 679 break;
687 case 1: 680 case 1:
688 device->dev[i].speed = 250; 681 device->dev[i].speed = 250;
689 break; 682 break;
690 case 2: 683 case 2:
691 device->dev[i].speed = 200; 684 device->dev[i].speed = 200;
692 break; 685 break;
693 case 3: 686 case 3:
694 device->dev[i].speed = 150; 687 device->dev[i].speed = 150;
695 break; 688 break;
696 case 4: 689 case 4:
697 device->dev[i].speed = 100; 690 device->dev[i].speed = 100;
698 break; 691 break;
699 case 7: 692 case 7:
700 if (++p == q)
701 return -EINVAL;
702 device->dev[i].speed = SPEED_CVT(*p);
703 while (*p & 0x80)
704 if (++p == q) 693 if (++p == q)
705 return -EINVAL; 694 return -EINVAL;
706 break; 695 device->dev[i].speed = SPEED_CVT(*p);
707 default: 696 while (*p & 0x80)
708 return -EINVAL; 697 if (++p == q)
709 } 698 return -EINVAL;
699 break;
700 default:
701 return -EINVAL;
702 }
710 703
711 if (++p == q) 704 if (++p == q)
712 return -EINVAL; 705 return -EINVAL;
713 if (*p == 0xff) 706 if (*p == 0xff)
714 break; 707 break;
715 scale = *p & 7; 708 scale = *p & 7;
716 if (scale == 7) 709 if (scale == 7)
717 return -EINVAL; 710 return -EINVAL;
718 device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2)); 711 device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
719 device->ndev++; 712 device->ndev++;
720 if (++p == q) 713 if (++p == q)
721 break; 714 break;
722 } 715 }
723 716
724 return 0; 717 return 0;
725} 718}
726 719
727/*====================================================================*/
728 720
729static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum) 721static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
730{ 722{
731 u_char *p; 723 u_char *p;
732 if (tuple->TupleDataLen < 5) 724 if (tuple->TupleDataLen < 5)
733 return -EINVAL; 725 return -EINVAL;
734 p = (u_char *) tuple->TupleData; 726 p = (u_char *) tuple->TupleData;
735 csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2; 727 csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
736 csum->len = get_unaligned_le16(p + 2); 728 csum->len = get_unaligned_le16(p + 2);
737 csum->sum = *(p + 4); 729 csum->sum = *(p + 4);
738 return 0; 730 return 0;
739} 731}
740 732
741/*====================================================================*/
742 733
743static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link) 734static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
744{ 735{
745 if (tuple->TupleDataLen < 4) 736 if (tuple->TupleDataLen < 4)
746 return -EINVAL; 737 return -EINVAL;
747 link->addr = get_unaligned_le32(tuple->TupleData); 738 link->addr = get_unaligned_le32(tuple->TupleData);
748 return 0; 739 return 0;
749} 740}
750 741
751/*====================================================================*/
752 742
753static int parse_longlink_mfc(tuple_t *tuple, 743static int parse_longlink_mfc(tuple_t *tuple, cistpl_longlink_mfc_t *link)
754 cistpl_longlink_mfc_t *link)
755{ 744{
756 u_char *p; 745 u_char *p;
757 int i; 746 int i;
758 747
759 p = (u_char *)tuple->TupleData; 748 p = (u_char *)tuple->TupleData;
760 749
761 link->nfn = *p; p++; 750 link->nfn = *p; p++;
762 if (tuple->TupleDataLen <= link->nfn*5) 751 if (tuple->TupleDataLen <= link->nfn*5)
763 return -EINVAL; 752 return -EINVAL;
764 for (i = 0; i < link->nfn; i++) { 753 for (i = 0; i < link->nfn; i++) {
765 link->fn[i].space = *p; p++; 754 link->fn[i].space = *p; p++;
766 link->fn[i].addr = get_unaligned_le32(p); 755 link->fn[i].addr = get_unaligned_le32(p);
767 p += 4; 756 p += 4;
768 } 757 }
769 return 0; 758 return 0;
770} 759}
771 760
772/*====================================================================*/
773 761
774static int parse_strings(u_char *p, u_char *q, int max, 762static int parse_strings(u_char *p, u_char *q, int max,
775 char *s, u_char *ofs, u_char *found) 763 char *s, u_char *ofs, u_char *found)
776{ 764{
777 int i, j, ns; 765 int i, j, ns;
778 766
779 if (p == q) 767 if (p == q)
780 return -EINVAL; 768 return -EINVAL;
781 ns = 0; j = 0; 769 ns = 0; j = 0;
782 for (i = 0; i < max; i++) { 770 for (i = 0; i < max; i++) {
783 if (*p == 0xff) 771 if (*p == 0xff)
784 break; 772 break;
785 ofs[i] = j; 773 ofs[i] = j;
786 ns++; 774 ns++;
787 for (;;) { 775 for (;;) {
788 s[j++] = (*p == 0xff) ? '\0' : *p; 776 s[j++] = (*p == 0xff) ? '\0' : *p;
789 if ((*p == '\0') || (*p == 0xff)) 777 if ((*p == '\0') || (*p == 0xff))
790 break; 778 break;
791 if (++p == q) 779 if (++p == q)
792 return -EINVAL; 780 return -EINVAL;
781 }
782 if ((*p == 0xff) || (++p == q))
783 break;
793 } 784 }
794 if ((*p == 0xff) || (++p == q)) 785 if (found) {
795 break; 786 *found = ns;
796 } 787 return 0;
797 if (found) { 788 }
798 *found = ns; 789
799 return 0;
800 } else {
801 return (ns == max) ? 0 : -EINVAL; 790 return (ns == max) ? 0 : -EINVAL;
802 }
803} 791}
804 792
805/*====================================================================*/
806 793
807static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1) 794static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
808{ 795{
809 u_char *p, *q; 796 u_char *p, *q;
810 797
811 p = (u_char *)tuple->TupleData; 798 p = (u_char *)tuple->TupleData;
812 q = p + tuple->TupleDataLen; 799 q = p + tuple->TupleDataLen;
813 800
814 vers_1->major = *p; p++; 801 vers_1->major = *p; p++;
815 vers_1->minor = *p; p++; 802 vers_1->minor = *p; p++;
816 if (p >= q) 803 if (p >= q)
817 return -EINVAL; 804 return -EINVAL;
818 805
819 return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS, 806 return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
820 vers_1->str, vers_1->ofs, &vers_1->ns); 807 vers_1->str, vers_1->ofs, &vers_1->ns);
821} 808}
822 809
823/*====================================================================*/
824 810
825static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr) 811static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
826{ 812{
827 u_char *p, *q; 813 u_char *p, *q;
828 814
829 p = (u_char *)tuple->TupleData; 815 p = (u_char *)tuple->TupleData;
830 q = p + tuple->TupleDataLen; 816 q = p + tuple->TupleDataLen;
831 817
832 return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS, 818 return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
833 altstr->str, altstr->ofs, &altstr->ns); 819 altstr->str, altstr->ofs, &altstr->ns);
834} 820}
835 821
836/*====================================================================*/
837 822
838static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec) 823static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
839{ 824{
840 u_char *p, *q; 825 u_char *p, *q;
841 int nid; 826 int nid;
842 827
843 p = (u_char *)tuple->TupleData; 828 p = (u_char *)tuple->TupleData;
844 q = p + tuple->TupleDataLen; 829 q = p + tuple->TupleDataLen;
845 830
846 for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) { 831 for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
847 if (p > q-2) 832 if (p > q-2)
848 break; 833 break;
849 jedec->id[nid].mfr = p[0]; 834 jedec->id[nid].mfr = p[0];
850 jedec->id[nid].info = p[1]; 835 jedec->id[nid].info = p[1];
851 p += 2; 836 p += 2;
852 } 837 }
853 jedec->nid = nid; 838 jedec->nid = nid;
854 return 0; 839 return 0;
855} 840}
856 841
857/*====================================================================*/
858 842
859static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m) 843static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
860{ 844{
861 if (tuple->TupleDataLen < 4) 845 if (tuple->TupleDataLen < 4)
862 return -EINVAL; 846 return -EINVAL;
863 m->manf = get_unaligned_le16(tuple->TupleData); 847 m->manf = get_unaligned_le16(tuple->TupleData);
864 m->card = get_unaligned_le16(tuple->TupleData + 2); 848 m->card = get_unaligned_le16(tuple->TupleData + 2);
865 return 0; 849 return 0;
866} 850}
867 851
868/*====================================================================*/
869 852
870static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f) 853static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f)
871{ 854{
872 u_char *p; 855 u_char *p;
873 if (tuple->TupleDataLen < 2) 856 if (tuple->TupleDataLen < 2)
874 return -EINVAL; 857 return -EINVAL;
875 p = (u_char *)tuple->TupleData; 858 p = (u_char *)tuple->TupleData;
876 f->func = p[0]; 859 f->func = p[0];
877 f->sysinit = p[1]; 860 f->sysinit = p[1];
878 return 0; 861 return 0;
879} 862}
880 863
881/*====================================================================*/
882 864
883static int parse_funce(tuple_t *tuple, cistpl_funce_t *f) 865static int parse_funce(tuple_t *tuple, cistpl_funce_t *f)
884{ 866{
885 u_char *p; 867 u_char *p;
886 int i; 868 int i;
887 if (tuple->TupleDataLen < 1) 869 if (tuple->TupleDataLen < 1)
888 return -EINVAL; 870 return -EINVAL;
889 p = (u_char *)tuple->TupleData; 871 p = (u_char *)tuple->TupleData;
890 f->type = p[0]; 872 f->type = p[0];
891 for (i = 1; i < tuple->TupleDataLen; i++) 873 for (i = 1; i < tuple->TupleDataLen; i++)
892 f->data[i-1] = p[i]; 874 f->data[i-1] = p[i];
893 return 0; 875 return 0;
894} 876}
895 877
896/*====================================================================*/
897 878
898static int parse_config(tuple_t *tuple, cistpl_config_t *config) 879static int parse_config(tuple_t *tuple, cistpl_config_t *config)
899{ 880{
900 int rasz, rmsz, i; 881 int rasz, rmsz, i;
901 u_char *p; 882 u_char *p;
902 883
903 p = (u_char *)tuple->TupleData; 884 p = (u_char *)tuple->TupleData;
904 rasz = *p & 0x03; 885 rasz = *p & 0x03;
905 rmsz = (*p & 0x3c) >> 2; 886 rmsz = (*p & 0x3c) >> 2;
906 if (tuple->TupleDataLen < rasz+rmsz+4) 887 if (tuple->TupleDataLen < rasz+rmsz+4)
907 return -EINVAL; 888 return -EINVAL;
908 config->last_idx = *(++p); 889 config->last_idx = *(++p);
909 p++; 890 p++;
910 config->base = 0; 891 config->base = 0;
911 for (i = 0; i <= rasz; i++) 892 for (i = 0; i <= rasz; i++)
912 config->base += p[i] << (8*i); 893 config->base += p[i] << (8*i);
913 p += rasz+1; 894 p += rasz+1;
914 for (i = 0; i < 4; i++) 895 for (i = 0; i < 4; i++)
915 config->rmask[i] = 0; 896 config->rmask[i] = 0;
916 for (i = 0; i <= rmsz; i++) 897 for (i = 0; i <= rmsz; i++)
917 config->rmask[i>>2] += p[i] << (8*(i%4)); 898 config->rmask[i>>2] += p[i] << (8*(i%4));
918 config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4); 899 config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
919 return 0; 900 return 0;
920} 901}
921 902
922/*====================================================================== 903/* The following routines are all used to parse the nightmarish
904 * config table entries.
905 */
906
907static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
908{
909 int i;
910 u_int scale;
923 911
924 The following routines are all used to parse the nightmarish 912 if (p == q)
925 config table entries. 913 return NULL;
914 pwr->present = *p;
915 pwr->flags = 0;
916 p++;
917 for (i = 0; i < 7; i++)
918 if (pwr->present & (1<<i)) {
919 if (p == q)
920 return NULL;
921 pwr->param[i] = POWER_CVT(*p);
922 scale = POWER_SCALE(*p);
923 while (*p & 0x80) {
924 if (++p == q)
925 return NULL;
926 if ((*p & 0x7f) < 100)
927 pwr->param[i] +=
928 (*p & 0x7f) * scale / 100;
929 else if (*p == 0x7d)
930 pwr->flags |= CISTPL_POWER_HIGHZ_OK;
931 else if (*p == 0x7e)
932 pwr->param[i] = 0;
933 else if (*p == 0x7f)
934 pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
935 else
936 return NULL;
937 }
938 p++;
939 }
940 return p;
941}
926 942
927======================================================================*/
928 943
929static u_char *parse_power(u_char *p, u_char *q, 944static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
930 cistpl_power_t *pwr)
931{ 945{
932 int i; 946 u_char scale;
933 u_int scale; 947
934 948 if (p == q)
935 if (p == q) 949 return NULL;
936 return NULL; 950 scale = *p;
937 pwr->present = *p; 951 if ((scale & 3) != 3) {
938 pwr->flags = 0;
939 p++;
940 for (i = 0; i < 7; i++)
941 if (pwr->present & (1<<i)) {
942 if (p == q)
943 return NULL;
944 pwr->param[i] = POWER_CVT(*p);
945 scale = POWER_SCALE(*p);
946 while (*p & 0x80) {
947 if (++p == q) 952 if (++p == q)
948 return NULL; 953 return NULL;
949 if ((*p & 0x7f) < 100) 954 timing->wait = SPEED_CVT(*p);
950 pwr->param[i] += (*p & 0x7f) * scale / 100; 955 timing->waitscale = exponent[scale & 3];
951 else if (*p == 0x7d) 956 } else
952 pwr->flags |= CISTPL_POWER_HIGHZ_OK; 957 timing->wait = 0;
953 else if (*p == 0x7e) 958 scale >>= 2;
954 pwr->param[i] = 0; 959 if ((scale & 7) != 7) {
955 else if (*p == 0x7f) 960 if (++p == q)
956 pwr->flags |= CISTPL_POWER_HIGHZ_REQ; 961 return NULL;
957 else 962 timing->ready = SPEED_CVT(*p);
958 return NULL; 963 timing->rdyscale = exponent[scale & 7];
959 } 964 } else
960 p++; 965 timing->ready = 0;
961 } 966 scale >>= 3;
962 return p; 967 if (scale != 7) {
968 if (++p == q)
969 return NULL;
970 timing->reserved = SPEED_CVT(*p);
971 timing->rsvscale = exponent[scale];
972 } else
973 timing->reserved = 0;
974 p++;
975 return p;
963} 976}
964 977
965/*====================================================================*/
966 978
967static u_char *parse_timing(u_char *p, u_char *q, 979static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
968 cistpl_timing_t *timing)
969{ 980{
970 u_char scale; 981 int i, j, bsz, lsz;
971 982
972 if (p == q) 983 if (p == q)
973 return NULL;
974 scale = *p;
975 if ((scale & 3) != 3) {
976 if (++p == q)
977 return NULL;
978 timing->wait = SPEED_CVT(*p);
979 timing->waitscale = exponent[scale & 3];
980 } else
981 timing->wait = 0;
982 scale >>= 2;
983 if ((scale & 7) != 7) {
984 if (++p == q)
985 return NULL; 984 return NULL;
986 timing->ready = SPEED_CVT(*p); 985 io->flags = *p;
987 timing->rdyscale = exponent[scale & 7]; 986
988 } else 987 if (!(*p & 0x80)) {
989 timing->ready = 0; 988 io->nwin = 1;
990 scale >>= 3; 989 io->win[0].base = 0;
991 if (scale != 7) { 990 io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
991 return p+1;
992 }
993
992 if (++p == q) 994 if (++p == q)
993 return NULL; 995 return NULL;
994 timing->reserved = SPEED_CVT(*p); 996 io->nwin = (*p & 0x0f) + 1;
995 timing->rsvscale = exponent[scale]; 997 bsz = (*p & 0x30) >> 4;
996 } else 998 if (bsz == 3)
997 timing->reserved = 0; 999 bsz++;
998 p++; 1000 lsz = (*p & 0xc0) >> 6;
999 return p; 1001 if (lsz == 3)
1000} 1002 lsz++;
1001 1003 p++;
1002/*====================================================================*/
1003 1004
1004static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) 1005 for (i = 0; i < io->nwin; i++) {
1005{ 1006 io->win[i].base = 0;
1006 int i, j, bsz, lsz; 1007 io->win[i].len = 1;
1007 1008 for (j = 0; j < bsz; j++, p++) {
1008 if (p == q) 1009 if (p == q)
1009 return NULL; 1010 return NULL;
1010 io->flags = *p; 1011 io->win[i].base += *p << (j*8);
1011 1012 }
1012 if (!(*p & 0x80)) { 1013 for (j = 0; j < lsz; j++, p++) {
1013 io->nwin = 1; 1014 if (p == q)
1014 io->win[0].base = 0; 1015 return NULL;
1015 io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK)); 1016 io->win[i].len += *p << (j*8);
1016 return p+1; 1017 }
1017 }
1018
1019 if (++p == q)
1020 return NULL;
1021 io->nwin = (*p & 0x0f) + 1;
1022 bsz = (*p & 0x30) >> 4;
1023 if (bsz == 3)
1024 bsz++;
1025 lsz = (*p & 0xc0) >> 6;
1026 if (lsz == 3)
1027 lsz++;
1028 p++;
1029
1030 for (i = 0; i < io->nwin; i++) {
1031 io->win[i].base = 0;
1032 io->win[i].len = 1;
1033 for (j = 0; j < bsz; j++, p++) {
1034 if (p == q)
1035 return NULL;
1036 io->win[i].base += *p << (j*8);
1037 }
1038 for (j = 0; j < lsz; j++, p++) {
1039 if (p == q)
1040 return NULL;
1041 io->win[i].len += *p << (j*8);
1042 } 1018 }
1043 } 1019 return p;
1044 return p;
1045} 1020}
1046 1021
1047/*====================================================================*/
1048 1022
1049static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem) 1023static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
1050{ 1024{
1051 int i, j, asz, lsz, has_ha; 1025 int i, j, asz, lsz, has_ha;
1052 u_int len, ca, ha; 1026 u_int len, ca, ha;
1053 1027
1054 if (p == q) 1028 if (p == q)
1055 return NULL; 1029 return NULL;
1056 1030
1057 mem->nwin = (*p & 0x07) + 1; 1031 mem->nwin = (*p & 0x07) + 1;
1058 lsz = (*p & 0x18) >> 3; 1032 lsz = (*p & 0x18) >> 3;
1059 asz = (*p & 0x60) >> 5; 1033 asz = (*p & 0x60) >> 5;
1060 has_ha = (*p & 0x80); 1034 has_ha = (*p & 0x80);
1061 if (++p == q) 1035 if (++p == q)
1062 return NULL; 1036 return NULL;
1063 1037
1064 for (i = 0; i < mem->nwin; i++) { 1038 for (i = 0; i < mem->nwin; i++) {
1065 len = ca = ha = 0; 1039 len = ca = ha = 0;
1066 for (j = 0; j < lsz; j++, p++) { 1040 for (j = 0; j < lsz; j++, p++) {
1067 if (p == q) 1041 if (p == q)
1068 return NULL; 1042 return NULL;
1069 len += *p << (j*8); 1043 len += *p << (j*8);
1070 } 1044 }
1071 for (j = 0; j < asz; j++, p++) { 1045 for (j = 0; j < asz; j++, p++) {
1072 if (p == q) 1046 if (p == q)
1073 return NULL; 1047 return NULL;
1074 ca += *p << (j*8); 1048 ca += *p << (j*8);
1049 }
1050 if (has_ha)
1051 for (j = 0; j < asz; j++, p++) {
1052 if (p == q)
1053 return NULL;
1054 ha += *p << (j*8);
1055 }
1056 mem->win[i].len = len << 8;
1057 mem->win[i].card_addr = ca << 8;
1058 mem->win[i].host_addr = ha << 8;
1075 } 1059 }
1076 if (has_ha) 1060 return p;
1077 for (j = 0; j < asz; j++, p++) {
1078 if (p == q)
1079 return NULL;
1080 ha += *p << (j*8);
1081 }
1082 mem->win[i].len = len << 8;
1083 mem->win[i].card_addr = ca << 8;
1084 mem->win[i].host_addr = ha << 8;
1085 }
1086 return p;
1087} 1061}
1088 1062
1089/*====================================================================*/
1090 1063
1091static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq) 1064static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
1092{ 1065{
1093 if (p == q) 1066 if (p == q)
1094 return NULL;
1095 irq->IRQInfo1 = *p; p++;
1096 if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
1097 if (p+2 > q)
1098 return NULL; 1067 return NULL;
1099 irq->IRQInfo2 = (p[1]<<8) + p[0]; 1068 irq->IRQInfo1 = *p; p++;
1100 p += 2; 1069 if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
1101 } 1070 if (p+2 > q)
1102 return p; 1071 return NULL;
1072 irq->IRQInfo2 = (p[1]<<8) + p[0];
1073 p += 2;
1074 }
1075 return p;
1103} 1076}
1104 1077
1105/*====================================================================*/
1106 1078
1107static int parse_cftable_entry(tuple_t *tuple, 1079static int parse_cftable_entry(tuple_t *tuple,
1108 cistpl_cftable_entry_t *entry) 1080 cistpl_cftable_entry_t *entry)
1109{ 1081{
1110 u_char *p, *q, features; 1082 u_char *p, *q, features;
1111 1083
1112 p = tuple->TupleData; 1084 p = tuple->TupleData;
1113 q = p + tuple->TupleDataLen; 1085 q = p + tuple->TupleDataLen;
1114 entry->index = *p & 0x3f; 1086 entry->index = *p & 0x3f;
1115 entry->flags = 0; 1087 entry->flags = 0;
1116 if (*p & 0x40)
1117 entry->flags |= CISTPL_CFTABLE_DEFAULT;
1118 if (*p & 0x80) {
1119 if (++p == q)
1120 return -EINVAL;
1121 if (*p & 0x10)
1122 entry->flags |= CISTPL_CFTABLE_BVDS;
1123 if (*p & 0x20)
1124 entry->flags |= CISTPL_CFTABLE_WP;
1125 if (*p & 0x40) 1088 if (*p & 0x40)
1126 entry->flags |= CISTPL_CFTABLE_RDYBSY; 1089 entry->flags |= CISTPL_CFTABLE_DEFAULT;
1127 if (*p & 0x80) 1090 if (*p & 0x80) {
1128 entry->flags |= CISTPL_CFTABLE_MWAIT; 1091 if (++p == q)
1129 entry->interface = *p & 0x0f; 1092 return -EINVAL;
1130 } else 1093 if (*p & 0x10)
1131 entry->interface = 0; 1094 entry->flags |= CISTPL_CFTABLE_BVDS;
1132 1095 if (*p & 0x20)
1133 /* Process optional features */ 1096 entry->flags |= CISTPL_CFTABLE_WP;
1134 if (++p == q) 1097 if (*p & 0x40)
1135 return -EINVAL; 1098 entry->flags |= CISTPL_CFTABLE_RDYBSY;
1136 features = *p; p++; 1099 if (*p & 0x80)
1137 1100 entry->flags |= CISTPL_CFTABLE_MWAIT;
1138 /* Power options */ 1101 entry->interface = *p & 0x0f;
1139 if ((features & 3) > 0) { 1102 } else
1140 p = parse_power(p, q, &entry->vcc); 1103 entry->interface = 0;
1141 if (p == NULL)
1142 return -EINVAL;
1143 } else
1144 entry->vcc.present = 0;
1145 if ((features & 3) > 1) {
1146 p = parse_power(p, q, &entry->vpp1);
1147 if (p == NULL)
1148 return -EINVAL;
1149 } else
1150 entry->vpp1.present = 0;
1151 if ((features & 3) > 2) {
1152 p = parse_power(p, q, &entry->vpp2);
1153 if (p == NULL)
1154 return -EINVAL;
1155 } else
1156 entry->vpp2.present = 0;
1157 1104
1158 /* Timing options */ 1105 /* Process optional features */
1159 if (features & 0x04) { 1106 if (++p == q)
1160 p = parse_timing(p, q, &entry->timing);
1161 if (p == NULL)
1162 return -EINVAL;
1163 } else {
1164 entry->timing.wait = 0;
1165 entry->timing.ready = 0;
1166 entry->timing.reserved = 0;
1167 }
1168
1169 /* I/O window options */
1170 if (features & 0x08) {
1171 p = parse_io(p, q, &entry->io);
1172 if (p == NULL)
1173 return -EINVAL; 1107 return -EINVAL;
1174 } else 1108 features = *p; p++;
1175 entry->io.nwin = 0;
1176 1109
1177 /* Interrupt options */ 1110 /* Power options */
1178 if (features & 0x10) { 1111 if ((features & 3) > 0) {
1179 p = parse_irq(p, q, &entry->irq); 1112 p = parse_power(p, q, &entry->vcc);
1180 if (p == NULL) 1113 if (p == NULL)
1181 return -EINVAL; 1114 return -EINVAL;
1182 } else 1115 } else
1183 entry->irq.IRQInfo1 = 0; 1116 entry->vcc.present = 0;
1184 1117 if ((features & 3) > 1) {
1185 switch (features & 0x60) { 1118 p = parse_power(p, q, &entry->vpp1);
1186 case 0x00: 1119 if (p == NULL)
1187 entry->mem.nwin = 0; 1120 return -EINVAL;
1188 break; 1121 } else
1189 case 0x20: 1122 entry->vpp1.present = 0;
1190 entry->mem.nwin = 1; 1123 if ((features & 3) > 2) {
1191 entry->mem.win[0].len = get_unaligned_le16(p) << 8; 1124 p = parse_power(p, q, &entry->vpp2);
1192 entry->mem.win[0].card_addr = 0; 1125 if (p == NULL)
1193 entry->mem.win[0].host_addr = 0; 1126 return -EINVAL;
1194 p += 2; 1127 } else
1195 if (p > q) 1128 entry->vpp2.present = 0;
1196 return -EINVAL;
1197 break;
1198 case 0x40:
1199 entry->mem.nwin = 1;
1200 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1201 entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
1202 entry->mem.win[0].host_addr = 0;
1203 p += 4;
1204 if (p > q)
1205 return -EINVAL;
1206 break;
1207 case 0x60:
1208 p = parse_mem(p, q, &entry->mem);
1209 if (p == NULL)
1210 return -EINVAL;
1211 break;
1212 }
1213 1129
1214 /* Misc features */ 1130 /* Timing options */
1215 if (features & 0x80) { 1131 if (features & 0x04) {
1216 if (p == q) 1132 p = parse_timing(p, q, &entry->timing);
1217 return -EINVAL; 1133 if (p == NULL)
1218 entry->flags |= (*p << 8); 1134 return -EINVAL;
1219 while (*p & 0x80) 1135 } else {
1220 if (++p == q) 1136 entry->timing.wait = 0;
1221 return -EINVAL; 1137 entry->timing.ready = 0;
1222 p++; 1138 entry->timing.reserved = 0;
1223 } 1139 }
1224 1140
1225 entry->subtuples = q-p; 1141 /* I/O window options */
1142 if (features & 0x08) {
1143 p = parse_io(p, q, &entry->io);
1144 if (p == NULL)
1145 return -EINVAL;
1146 } else
1147 entry->io.nwin = 0;
1148
1149 /* Interrupt options */
1150 if (features & 0x10) {
1151 p = parse_irq(p, q, &entry->irq);
1152 if (p == NULL)
1153 return -EINVAL;
1154 } else
1155 entry->irq.IRQInfo1 = 0;
1156
1157 switch (features & 0x60) {
1158 case 0x00:
1159 entry->mem.nwin = 0;
1160 break;
1161 case 0x20:
1162 entry->mem.nwin = 1;
1163 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1164 entry->mem.win[0].card_addr = 0;
1165 entry->mem.win[0].host_addr = 0;
1166 p += 2;
1167 if (p > q)
1168 return -EINVAL;
1169 break;
1170 case 0x40:
1171 entry->mem.nwin = 1;
1172 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1173 entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
1174 entry->mem.win[0].host_addr = 0;
1175 p += 4;
1176 if (p > q)
1177 return -EINVAL;
1178 break;
1179 case 0x60:
1180 p = parse_mem(p, q, &entry->mem);
1181 if (p == NULL)
1182 return -EINVAL;
1183 break;
1184 }
1185
1186 /* Misc features */
1187 if (features & 0x80) {
1188 if (p == q)
1189 return -EINVAL;
1190 entry->flags |= (*p << 8);
1191 while (*p & 0x80)
1192 if (++p == q)
1193 return -EINVAL;
1194 p++;
1195 }
1196
1197 entry->subtuples = q-p;
1226 1198
1227 return 0; 1199 return 0;
1228} 1200}
1229 1201
1230/*====================================================================*/
1231 1202
1232static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo) 1203static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
1233{ 1204{
1234 u_char *p, *q; 1205 u_char *p, *q;
1235 int n; 1206 int n;
1236 1207
1237 p = (u_char *)tuple->TupleData; 1208 p = (u_char *)tuple->TupleData;
1238 q = p + tuple->TupleDataLen; 1209 q = p + tuple->TupleDataLen;
1239 1210
1240 for (n = 0; n < CISTPL_MAX_DEVICES; n++) { 1211 for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
1241 if (p > q-6) 1212 if (p > q-6)
1242 break; 1213 break;
1243 geo->geo[n].buswidth = p[0]; 1214 geo->geo[n].buswidth = p[0];
1244 geo->geo[n].erase_block = 1 << (p[1]-1); 1215 geo->geo[n].erase_block = 1 << (p[1]-1);
1245 geo->geo[n].read_block = 1 << (p[2]-1); 1216 geo->geo[n].read_block = 1 << (p[2]-1);
1246 geo->geo[n].write_block = 1 << (p[3]-1); 1217 geo->geo[n].write_block = 1 << (p[3]-1);
1247 geo->geo[n].partition = 1 << (p[4]-1); 1218 geo->geo[n].partition = 1 << (p[4]-1);
1248 geo->geo[n].interleave = 1 << (p[5]-1); 1219 geo->geo[n].interleave = 1 << (p[5]-1);
1249 p += 6; 1220 p += 6;
1250 } 1221 }
1251 geo->ngeo = n; 1222 geo->ngeo = n;
1252 return 0; 1223 return 0;
1253} 1224}
1254 1225
1255/*====================================================================*/
1256 1226
1257static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2) 1227static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
1258{ 1228{
1259 u_char *p, *q; 1229 u_char *p, *q;
1260 1230
1261 if (tuple->TupleDataLen < 10) 1231 if (tuple->TupleDataLen < 10)
1262 return -EINVAL; 1232 return -EINVAL;
1263 1233
1264 p = tuple->TupleData; 1234 p = tuple->TupleData;
1265 q = p + tuple->TupleDataLen; 1235 q = p + tuple->TupleDataLen;
1266 1236
1267 v2->vers = p[0]; 1237 v2->vers = p[0];
1268 v2->comply = p[1]; 1238 v2->comply = p[1];
1269 v2->dindex = get_unaligned_le16(p + 2); 1239 v2->dindex = get_unaligned_le16(p + 2);
1270 v2->vspec8 = p[6]; 1240 v2->vspec8 = p[6];
1271 v2->vspec9 = p[7]; 1241 v2->vspec9 = p[7];
1272 v2->nhdr = p[8]; 1242 v2->nhdr = p[8];
1273 p += 9; 1243 p += 9;
1274 return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL); 1244 return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
1275} 1245}
1276 1246
1277/*====================================================================*/
1278 1247
1279static int parse_org(tuple_t *tuple, cistpl_org_t *org) 1248static int parse_org(tuple_t *tuple, cistpl_org_t *org)
1280{ 1249{
1281 u_char *p, *q; 1250 u_char *p, *q;
1282 int i; 1251 int i;
1283 1252
1284 p = tuple->TupleData; 1253 p = tuple->TupleData;
1285 q = p + tuple->TupleDataLen; 1254 q = p + tuple->TupleDataLen;
1286 if (p == q) 1255 if (p == q)
1287 return -EINVAL; 1256 return -EINVAL;
1288 org->data_org = *p; 1257 org->data_org = *p;
1289 if (++p == q)
1290 return -EINVAL;
1291 for (i = 0; i < 30; i++) {
1292 org->desc[i] = *p;
1293 if (*p == '\0')
1294 break;
1295 if (++p == q) 1258 if (++p == q)
1296 return -EINVAL; 1259 return -EINVAL;
1297 } 1260 for (i = 0; i < 30; i++) {
1298 return 0; 1261 org->desc[i] = *p;
1262 if (*p == '\0')
1263 break;
1264 if (++p == q)
1265 return -EINVAL;
1266 }
1267 return 0;
1299} 1268}
1300 1269
1301/*====================================================================*/
1302 1270
1303static int parse_format(tuple_t *tuple, cistpl_format_t *fmt) 1271static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
1304{ 1272{
1305 u_char *p; 1273 u_char *p;
1306 1274
1307 if (tuple->TupleDataLen < 10) 1275 if (tuple->TupleDataLen < 10)
1308 return -EINVAL; 1276 return -EINVAL;
1309 1277
1310 p = tuple->TupleData; 1278 p = tuple->TupleData;
1311 1279
1312 fmt->type = p[0]; 1280 fmt->type = p[0];
1313 fmt->edc = p[1]; 1281 fmt->edc = p[1];
1314 fmt->offset = get_unaligned_le32(p + 2); 1282 fmt->offset = get_unaligned_le32(p + 2);
1315 fmt->length = get_unaligned_le32(p + 6); 1283 fmt->length = get_unaligned_le32(p + 6);
1316 1284
1317 return 0; 1285 return 0;
1318} 1286}
1319 1287
1320/*====================================================================*/
1321 1288
1322int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse) 1289int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
1323{ 1290{
1324 int ret = 0; 1291 int ret = 0;
1325 1292
1326 if (tuple->TupleDataLen > tuple->TupleDataMax) 1293 if (tuple->TupleDataLen > tuple->TupleDataMax)
1327 return -EINVAL; 1294 return -EINVAL;
1328 switch (tuple->TupleCode) { 1295 switch (tuple->TupleCode) {
1329 case CISTPL_DEVICE: 1296 case CISTPL_DEVICE:
1330 case CISTPL_DEVICE_A: 1297 case CISTPL_DEVICE_A:
1331 ret = parse_device(tuple, &parse->device); 1298 ret = parse_device(tuple, &parse->device);
1332 break; 1299 break;
1333 case CISTPL_CHECKSUM: 1300 case CISTPL_CHECKSUM:
1334 ret = parse_checksum(tuple, &parse->checksum); 1301 ret = parse_checksum(tuple, &parse->checksum);
1335 break; 1302 break;
1336 case CISTPL_LONGLINK_A: 1303 case CISTPL_LONGLINK_A:
1337 case CISTPL_LONGLINK_C: 1304 case CISTPL_LONGLINK_C:
1338 ret = parse_longlink(tuple, &parse->longlink); 1305 ret = parse_longlink(tuple, &parse->longlink);
1339 break; 1306 break;
1340 case CISTPL_LONGLINK_MFC: 1307 case CISTPL_LONGLINK_MFC:
1341 ret = parse_longlink_mfc(tuple, &parse->longlink_mfc); 1308 ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
1342 break; 1309 break;
1343 case CISTPL_VERS_1: 1310 case CISTPL_VERS_1:
1344 ret = parse_vers_1(tuple, &parse->version_1); 1311 ret = parse_vers_1(tuple, &parse->version_1);
1345 break; 1312 break;
1346 case CISTPL_ALTSTR: 1313 case CISTPL_ALTSTR:
1347 ret = parse_altstr(tuple, &parse->altstr); 1314 ret = parse_altstr(tuple, &parse->altstr);
1348 break; 1315 break;
1349 case CISTPL_JEDEC_A: 1316 case CISTPL_JEDEC_A:
1350 case CISTPL_JEDEC_C: 1317 case CISTPL_JEDEC_C:
1351 ret = parse_jedec(tuple, &parse->jedec); 1318 ret = parse_jedec(tuple, &parse->jedec);
1352 break; 1319 break;
1353 case CISTPL_MANFID: 1320 case CISTPL_MANFID:
1354 ret = parse_manfid(tuple, &parse->manfid); 1321 ret = parse_manfid(tuple, &parse->manfid);
1355 break; 1322 break;
1356 case CISTPL_FUNCID: 1323 case CISTPL_FUNCID:
1357 ret = parse_funcid(tuple, &parse->funcid); 1324 ret = parse_funcid(tuple, &parse->funcid);
1358 break; 1325 break;
1359 case CISTPL_FUNCE: 1326 case CISTPL_FUNCE:
1360 ret = parse_funce(tuple, &parse->funce); 1327 ret = parse_funce(tuple, &parse->funce);
1361 break; 1328 break;
1362 case CISTPL_CONFIG: 1329 case CISTPL_CONFIG:
1363 ret = parse_config(tuple, &parse->config); 1330 ret = parse_config(tuple, &parse->config);
1364 break; 1331 break;
1365 case CISTPL_CFTABLE_ENTRY: 1332 case CISTPL_CFTABLE_ENTRY:
1366 ret = parse_cftable_entry(tuple, &parse->cftable_entry); 1333 ret = parse_cftable_entry(tuple, &parse->cftable_entry);
1367 break; 1334 break;
1368 case CISTPL_DEVICE_GEO: 1335 case CISTPL_DEVICE_GEO:
1369 case CISTPL_DEVICE_GEO_A: 1336 case CISTPL_DEVICE_GEO_A:
1370 ret = parse_device_geo(tuple, &parse->device_geo); 1337 ret = parse_device_geo(tuple, &parse->device_geo);
1371 break; 1338 break;
1372 case CISTPL_VERS_2: 1339 case CISTPL_VERS_2:
1373 ret = parse_vers_2(tuple, &parse->vers_2); 1340 ret = parse_vers_2(tuple, &parse->vers_2);
1374 break; 1341 break;
1375 case CISTPL_ORG: 1342 case CISTPL_ORG:
1376 ret = parse_org(tuple, &parse->org); 1343 ret = parse_org(tuple, &parse->org);
1377 break; 1344 break;
1378 case CISTPL_FORMAT: 1345 case CISTPL_FORMAT:
1379 case CISTPL_FORMAT_A: 1346 case CISTPL_FORMAT_A:
1380 ret = parse_format(tuple, &parse->format); 1347 ret = parse_format(tuple, &parse->format);
1381 break; 1348 break;
1382 case CISTPL_NO_LINK: 1349 case CISTPL_NO_LINK:
1383 case CISTPL_LINKTARGET: 1350 case CISTPL_LINKTARGET:
1384 ret = 0; 1351 ret = 0;
1385 break; 1352 break;
1386 default: 1353 default:
1387 ret = -EINVAL; 1354 ret = -EINVAL;
1388 break; 1355 break;
1389 } 1356 }
1390 if (ret) 1357 if (ret)
1391 pr_debug("parse_tuple failed %d\n", ret); 1358 pr_debug("parse_tuple failed %d\n", ret);
1392 return ret; 1359 return ret;
1393} 1360}
1394EXPORT_SYMBOL(pcmcia_parse_tuple); 1361EXPORT_SYMBOL(pcmcia_parse_tuple);
1395 1362
1396/*======================================================================
1397 1363
1398 This is used internally by Card Services to look up CIS stuff. 1364/**
1399 1365 * pccard_read_tuple() - internal CIS tuple access
1400======================================================================*/ 1366 * @s: the struct pcmcia_socket where the card is inserted
1401 1367 * @function: the device function we loop for
1402int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse) 1368 * @code: which CIS code shall we look for?
1369 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
1370 *
1371 * pccard_read_tuple() reads out one tuple and attempts to parse it
1372 */
1373int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
1374 cisdata_t code, void *parse)
1403{ 1375{
1404 tuple_t tuple; 1376 tuple_t tuple;
1405 cisdata_t *buf; 1377 cisdata_t *buf;
1406 int ret; 1378 int ret;
1407 1379
1408 buf = kmalloc(256, GFP_KERNEL); 1380 buf = kmalloc(256, GFP_KERNEL);
1409 if (buf == NULL) { 1381 if (buf == NULL) {
1410 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); 1382 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
1411 return -ENOMEM; 1383 return -ENOMEM;
1412 } 1384 }
1413 tuple.DesiredTuple = code; 1385 tuple.DesiredTuple = code;
1414 tuple.Attributes = 0; 1386 tuple.Attributes = 0;
1415 if (function == BIND_FN_ALL) 1387 if (function == BIND_FN_ALL)
1416 tuple.Attributes = TUPLE_RETURN_COMMON; 1388 tuple.Attributes = TUPLE_RETURN_COMMON;
1417 ret = pccard_get_first_tuple(s, function, &tuple); 1389 ret = pccard_get_first_tuple(s, function, &tuple);
1418 if (ret != 0) 1390 if (ret != 0)
1419 goto done; 1391 goto done;
1420 tuple.TupleData = buf; 1392 tuple.TupleData = buf;
1421 tuple.TupleOffset = 0; 1393 tuple.TupleOffset = 0;
1422 tuple.TupleDataMax = 255; 1394 tuple.TupleDataMax = 255;
1423 ret = pccard_get_tuple_data(s, &tuple); 1395 ret = pccard_get_tuple_data(s, &tuple);
1424 if (ret != 0) 1396 if (ret != 0)
1425 goto done; 1397 goto done;
1426 ret = pcmcia_parse_tuple(&tuple, parse); 1398 ret = pcmcia_parse_tuple(&tuple, parse);
1427done: 1399done:
1428 kfree(buf); 1400 kfree(buf);
1429 return ret; 1401 return ret;
1430} 1402}
1431 1403
1432 1404
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 3889cf07d6ce..9254ab0b29b1 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -42,7 +42,6 @@ struct db1x_pcmcia_sock {
42 int nr; /* socket number */ 42 int nr; /* socket number */
43 void *virt_io; 43 void *virt_io;
44 44
45 /* the "pseudo" addresses of the PCMCIA space. */
46 phys_addr_t phys_io; 45 phys_addr_t phys_io;
47 phys_addr_t phys_attr; 46 phys_addr_t phys_attr;
48 phys_addr_t phys_mem; 47 phys_addr_t phys_mem;
@@ -437,7 +436,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
437 * This includes IRQs for Carddetection/ejection, the card 436 * This includes IRQs for Carddetection/ejection, the card
438 * itself and optional status change detection. 437 * itself and optional status change detection.
439 * Also, the memory areas covered by a socket. For these 438 * Also, the memory areas covered by a socket. For these
440 * we require the 32bit "pseudo" addresses (see the au1000.h 439 * we require the real 36bit addresses (see the au1000.h
441 * header for more information). 440 * header for more information).
442 */ 441 */
443 442
@@ -459,11 +458,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
459 458
460 ret = -ENODEV; 459 ret = -ENODEV;
461 460
462 /* 461 /* 36bit PCMCIA Attribute area address */
463 * pseudo-attr: The 32bit address of the PCMCIA attribute space
464 * for this socket (usually the 36bit address shifted 4 to the
465 * right).
466 */
467 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr"); 462 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
468 if (!r) { 463 if (!r) {
469 printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n", 464 printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n",
@@ -472,10 +467,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
472 } 467 }
473 sock->phys_attr = r->start; 468 sock->phys_attr = r->start;
474 469
475 /* 470 /* 36bit PCMCIA Memory area address */
476 * pseudo-mem: The 32bit address of the PCMCIA memory space for
477 * this socket (usually the 36bit address shifted 4 to the right)
478 */
479 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem"); 471 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
480 if (!r) { 472 if (!r) {
481 printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n", 473 printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n",
@@ -484,10 +476,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
484 } 476 }
485 sock->phys_mem = r->start; 477 sock->phys_mem = r->start;
486 478
487 /* 479 /* 36bit PCMCIA IO area address */
488 * pseudo-io: The 32bit address of the PCMCIA IO space for this
489 * socket (usually the 36bit address shifted 4 to the right).
490 */
491 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io"); 480 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
492 if (!r) { 481 if (!r) {
493 printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n", 482 printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n",
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index e1741cd875aa..7c204910a777 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -48,23 +48,13 @@ MODULE_AUTHOR("Jun Komuro <komurojun-mbn@nifty.com>");
48 * Specifies the interrupt delivery mode. The default (1) is to use PCI 48 * Specifies the interrupt delivery mode. The default (1) is to use PCI
49 * interrupts; a value of 0 selects ISA interrupts. This must be set for 49 * interrupts; a value of 0 selects ISA interrupts. This must be set for
50 * correct operation of PCI card readers. 50 * correct operation of PCI card readers.
51 *
52 * irq_list=i,j,...
53 * This list limits the set of interrupts that can be used by PCMCIA
54 * cards.
55 * The default list is 3,4,5,7,9,10,11.
56 * (irq_list parameter is not used, if irq_mode = 1)
57 */ 51 */
58 52
59static int irq_mode = 1; /* 0 = ISA interrupt, 1 = PCI interrupt */ 53static int irq_mode = 1; /* 0 = ISA interrupt, 1 = PCI interrupt */
60static int irq_list[16];
61static unsigned int irq_list_count = 0;
62 54
63module_param(irq_mode, int, 0444); 55module_param(irq_mode, int, 0444);
64module_param_array(irq_list, int, &irq_list_count, 0444);
65MODULE_PARM_DESC(irq_mode, 56MODULE_PARM_DESC(irq_mode,
66 "interrupt delivery mode. 0 = ISA, 1 = PCI. default is 1"); 57 "interrupt delivery mode. 0 = ISA, 1 = PCI. default is 1");
67MODULE_PARM_DESC(irq_list, "interrupts that can be used by PCMCIA cards");
68 58
69static DEFINE_SPINLOCK(port_lock); 59static DEFINE_SPINLOCK(port_lock);
70 60
@@ -605,13 +595,7 @@ static u_int __devinit pd6729_isa_scan(void)
605 return 0; 595 return 0;
606 } 596 }
607 597
608 if (irq_list_count == 0) 598 mask0 = PD67_MASK;
609 mask0 = 0xffff;
610 else
611 for (i = mask0 = 0; i < irq_list_count; i++)
612 mask0 |= (1<<irq_list[i]);
613
614 mask0 &= PD67_MASK;
615 599
616 /* just find interrupts that aren't in use */ 600 /* just find interrupts that aren't in use */
617 for (i = 0; i < 16; i++) 601 for (i = 0; i < 16; i++)
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index e6f7d410aed6..452c83b512c4 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -79,9 +79,8 @@ static resource_size_t pcmcia_align(void *align_data,
79 79
80#ifdef CONFIG_X86 80#ifdef CONFIG_X86
81 if (res->flags & IORESOURCE_IO) { 81 if (res->flags & IORESOURCE_IO) {
82 if (start & 0x300) { 82 if (start & 0x300)
83 start = (start + 0x3ff) & ~0x3ff; 83 start = (start + 0x3ff) & ~0x3ff;
84 }
85 } 84 }
86#endif 85#endif
87 86
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 61560cd6e287..f9009d34254b 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -218,11 +218,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
218 218
219 ret = -ENODEV; 219 ret = -ENODEV;
220 220
221 /* 221 /* 36bit PCMCIA Attribute area address */
222 * pseudo-attr: The 32bit address of the PCMCIA attribute space
223 * for this socket (usually the 36bit address shifted 4 to the
224 * right).
225 */
226 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr"); 222 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
227 if (!r) { 223 if (!r) {
228 dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n"); 224 dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n");
@@ -230,10 +226,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
230 } 226 }
231 sock->phys_attr = r->start; 227 sock->phys_attr = r->start;
232 228
233 /* 229 /* 36bit PCMCIA Memory area address */
234 * pseudo-mem: The 32bit address of the PCMCIA memory space for
235 * this socket (usually the 36bit address shifted 4 to the right)
236 */
237 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem"); 230 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
238 if (!r) { 231 if (!r) {
239 dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n"); 232 dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n");
@@ -241,10 +234,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
241 } 234 }
242 sock->phys_mem = r->start; 235 sock->phys_mem = r->start;
243 236
244 /* 237 /* 36bit PCMCIA IO area address */
245 * pseudo-io: The 32bit address of the PCMCIA IO space for this
246 * socket (usually the 36bit address shifted 4 to the right).
247 */
248 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io"); 238 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
249 if (!r) { 239 if (!r) {
250 dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n"); 240 dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index b85375f87622..967c766f53ba 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1408,10 +1408,10 @@ static struct pci_device_id yenta_table[] = {
1408 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX), 1408 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
1409 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX), 1409 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
1410 1410
1411 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX), 1411 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, ENE),
1412 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX), 1412 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, ENE),
1413 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX), 1413 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, ENE),
1414 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX), 1414 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, ENE),
1415 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE), 1415 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
1416 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE), 1416 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
1417 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE), 1417 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6848f213eb53..cd2ee6fce1b4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -59,6 +59,8 @@ config ASUS_LAPTOP
59 select NEW_LEDS 59 select NEW_LEDS
60 select BACKLIGHT_CLASS_DEVICE 60 select BACKLIGHT_CLASS_DEVICE
61 depends on INPUT 61 depends on INPUT
62 depends on RFKILL || RFKILL = n
63 select INPUT_SPARSEKMAP
62 ---help--- 64 ---help---
63 This is the new Linux driver for Asus laptops. It may also support some 65 This is the new Linux driver for Asus laptops. It may also support some
64 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate 66 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
@@ -177,6 +179,7 @@ config COMPAL_LAPTOP
177 tristate "Compal Laptop Extras" 179 tristate "Compal Laptop Extras"
178 depends on ACPI 180 depends on ACPI
179 depends on BACKLIGHT_CLASS_DEVICE 181 depends on BACKLIGHT_CLASS_DEVICE
182 depends on RFKILL
180 ---help--- 183 ---help---
181 This is a driver for laptops built by Compal: 184 This is a driver for laptops built by Compal:
182 185
@@ -320,9 +323,15 @@ config THINKPAD_ACPI_VIDEO
320 server running, phase of the moon, and the current mood of 323 server running, phase of the moon, and the current mood of
321 Schroedinger's cat. If you can use X.org's RandR to control 324 Schroedinger's cat. If you can use X.org's RandR to control
322 your ThinkPad's video output ports instead of this feature, 325 your ThinkPad's video output ports instead of this feature,
323 don't think twice: do it and say N here to save some memory. 326 don't think twice: do it and say N here to save memory and avoid
327 bad interactions with X.org.
328
329 NOTE: access to this feature is limited to processes with the
330 CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
331 where it interacts badly with X.org.
324 332
325 If you are not sure, say Y here. 333 If you are not sure, say Y here but do try to check if you could
334 be using X.org RandR instead.
326 335
327config THINKPAD_ACPI_HOTKEY_POLL 336config THINKPAD_ACPI_HOTKEY_POLL
328 bool "Support NVRAM polling for hot keys" 337 bool "Support NVRAM polling for hot keys"
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 61a1c7503658..791fcf321506 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -45,58 +45,23 @@
45#include <linux/fb.h> 45#include <linux/fb.h>
46#include <linux/leds.h> 46#include <linux/leds.h>
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/uaccess.h>
49#include <linux/input.h>
50#include <linux/input/sparse-keymap.h>
51#include <linux/rfkill.h>
48#include <acpi/acpi_drivers.h> 52#include <acpi/acpi_drivers.h>
49#include <acpi/acpi_bus.h> 53#include <acpi/acpi_bus.h>
50#include <asm/uaccess.h>
51#include <linux/input.h>
52
53#define ASUS_LAPTOP_VERSION "0.42"
54
55#define ASUS_HOTK_NAME "Asus Laptop Support"
56#define ASUS_HOTK_CLASS "hotkey"
57#define ASUS_HOTK_DEVICE_NAME "Hotkey"
58#define ASUS_HOTK_FILE KBUILD_MODNAME
59#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
60 54
55#define ASUS_LAPTOP_VERSION "0.42"
61 56
62/* 57#define ASUS_LAPTOP_NAME "Asus Laptop Support"
63 * Some events we use, same for all Asus 58#define ASUS_LAPTOP_CLASS "hotkey"
64 */ 59#define ASUS_LAPTOP_DEVICE_NAME "Hotkey"
65#define ATKD_BR_UP 0x10 60#define ASUS_LAPTOP_FILE KBUILD_MODNAME
66#define ATKD_BR_DOWN 0x20 61#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD."
67#define ATKD_LCD_ON 0x33
68#define ATKD_LCD_OFF 0x34
69
70/*
71 * Known bits returned by \_SB.ATKD.HWRS
72 */
73#define WL_HWRS 0x80
74#define BT_HWRS 0x100
75
76/*
77 * Flags for hotk status
78 * WL_ON and BT_ON are also used for wireless_status()
79 */
80#define WL_ON 0x01 /* internal Wifi */
81#define BT_ON 0x02 /* internal Bluetooth */
82#define MLED_ON 0x04 /* mail LED */
83#define TLED_ON 0x08 /* touchpad LED */
84#define RLED_ON 0x10 /* Record LED */
85#define PLED_ON 0x20 /* Phone LED */
86#define GLED_ON 0x40 /* Gaming LED */
87#define LCD_ON 0x80 /* LCD backlight */
88#define GPS_ON 0x100 /* GPS */
89#define KEY_ON 0x200 /* Keyboard backlight */
90
91#define ASUS_LOG ASUS_HOTK_FILE ": "
92#define ASUS_ERR KERN_ERR ASUS_LOG
93#define ASUS_WARNING KERN_WARNING ASUS_LOG
94#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
95#define ASUS_INFO KERN_INFO ASUS_LOG
96#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
97 62
98MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary"); 63MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
99MODULE_DESCRIPTION(ASUS_HOTK_NAME); 64MODULE_DESCRIPTION(ASUS_LAPTOP_NAME);
100MODULE_LICENSE("GPL"); 65MODULE_LICENSE("GPL");
101 66
102/* 67/*
@@ -113,225 +78,209 @@ static uint wapf = 1;
113module_param(wapf, uint, 0644); 78module_param(wapf, uint, 0644);
114MODULE_PARM_DESC(wapf, "WAPF value"); 79MODULE_PARM_DESC(wapf, "WAPF value");
115 80
116#define ASUS_HANDLE(object, paths...) \ 81static uint wlan_status = 1;
117 static acpi_handle object##_handle = NULL; \ 82static uint bluetooth_status = 1;
118 static char *object##_paths[] = { paths } 83
84module_param(wlan_status, uint, 0644);
85MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
86 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
87 "default is 1");
88
89module_param(bluetooth_status, uint, 0644);
90MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
91 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
92 "default is 1");
93
94/*
95 * Some events we use, same for all Asus
96 */
97#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
98#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
99#define ATKD_BR_MIN ATKD_BR_UP
100#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
101#define ATKD_LCD_ON 0x33
102#define ATKD_LCD_OFF 0x34
103
104/*
105 * Known bits returned by \_SB.ATKD.HWRS
106 */
107#define WL_HWRS 0x80
108#define BT_HWRS 0x100
109
110/*
111 * Flags for hotk status
112 * WL_ON and BT_ON are also used for wireless_status()
113 */
114#define WL_RSTS 0x01 /* internal Wifi */
115#define BT_RSTS 0x02 /* internal Bluetooth */
119 116
120/* LED */ 117/* LED */
121ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED"); 118#define METHOD_MLED "MLED"
122ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED"); 119#define METHOD_TLED "TLED"
123ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */ 120#define METHOD_RLED "RLED" /* W1JC */
124ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */ 121#define METHOD_PLED "PLED" /* A7J */
125ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */ 122#define METHOD_GLED "GLED" /* G1, G2 (probably) */
126 123
127/* LEDD */ 124/* LEDD */
128ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM"); 125#define METHOD_LEDD "SLCM"
129 126
130/* 127/*
131 * Bluetooth and WLAN 128 * Bluetooth and WLAN
132 * WLED and BLED are not handled like other XLED, because in some dsdt 129 * WLED and BLED are not handled like other XLED, because in some dsdt
133 * they also control the WLAN/Bluetooth device. 130 * they also control the WLAN/Bluetooth device.
134 */ 131 */
135ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED"); 132#define METHOD_WLAN "WLED"
136ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED"); 133#define METHOD_BLUETOOTH "BLED"
137ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */ 134#define METHOD_WL_STATUS "RSTS"
138 135
139/* Brightness */ 136/* Brightness */
140ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV"); 137#define METHOD_BRIGHTNESS_SET "SPLV"
141ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV"); 138#define METHOD_BRIGHTNESS_GET "GPLV"
142 139
143/* Backlight */ 140/* Backlight */
144ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ 141static acpi_handle lcd_switch_handle;
145 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ 142static const char *lcd_switch_paths[] = {
146 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ 143 "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
147 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ 144 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
148 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ 145 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
149 "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */ 146 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
150 "\\_SB.PCI0.PX40.Q10", /* S1x */ 147 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
151 "\\Q10"); /* A2x, L2D, L3D, M2E */ 148 "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
149 "\\_SB.PCI0.PX40.Q10", /* S1x */
150 "\\Q10"}; /* A2x, L2D, L3D, M2E */
152 151
153/* Display */ 152/* Display */
154ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP"); 153#define METHOD_SWITCH_DISPLAY "SDSP"
155ASUS_HANDLE(display_get, 154
156 /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ 155static acpi_handle display_get_handle;
157 "\\_SB.PCI0.P0P1.VGA.GETD", 156static const char *display_get_paths[] = {
158 /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ 157 /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
159 "\\_SB.PCI0.P0P2.VGA.GETD", 158 "\\_SB.PCI0.P0P1.VGA.GETD",
160 /* A6V A6Q */ 159 /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
161 "\\_SB.PCI0.P0P3.VGA.GETD", 160 "\\_SB.PCI0.P0P2.VGA.GETD",
162 /* A6T, A6M */ 161 /* A6V A6Q */
163 "\\_SB.PCI0.P0PA.VGA.GETD", 162 "\\_SB.PCI0.P0P3.VGA.GETD",
164 /* L3C */ 163 /* A6T, A6M */
165 "\\_SB.PCI0.PCI1.VGAC.NMAP", 164 "\\_SB.PCI0.P0PA.VGA.GETD",
166 /* Z96F */ 165 /* L3C */
167 "\\_SB.PCI0.VGA.GETD", 166 "\\_SB.PCI0.PCI1.VGAC.NMAP",
168 /* A2D */ 167 /* Z96F */
169 "\\ACTD", 168 "\\_SB.PCI0.VGA.GETD",
170 /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */ 169 /* A2D */
171 "\\ADVG", 170 "\\ACTD",
172 /* P30 */ 171 /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
173 "\\DNXT", 172 "\\ADVG",
174 /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */ 173 /* P30 */
175 "\\INFB", 174 "\\DNXT",
176 /* A3F A6F A3N A3L M6N W3N W6A */ 175 /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
177 "\\SSTE"); 176 "\\INFB",
178 177 /* A3F A6F A3N A3L M6N W3N W6A */
179ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */ 178 "\\SSTE"};
180ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */ 179
180#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
181#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
181 182
182/* GPS */ 183/* GPS */
183/* R2H use different handle for GPS on/off */ 184/* R2H use different handle for GPS on/off */
184ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */ 185#define METHOD_GPS_ON "SDON"
185ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */ 186#define METHOD_GPS_OFF "SDOF"
186ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST"); 187#define METHOD_GPS_STATUS "GPST"
187 188
188/* Keyboard light */ 189/* Keyboard light */
189ASUS_HANDLE(kled_set, ASUS_HOTK_PREFIX "SLKB"); 190#define METHOD_KBD_LIGHT_SET "SLKB"
190ASUS_HANDLE(kled_get, ASUS_HOTK_PREFIX "GLKB"); 191#define METHOD_KBD_LIGHT_GET "GLKB"
191 192
192/* 193/*
193 * This is the main structure, we can use it to store anything interesting 194 * Define a specific led structure to keep the main structure clean
194 * about the hotk device
195 */ 195 */
196struct asus_hotk { 196struct asus_led {
197 char *name; /* laptop name */ 197 int wk;
198 struct acpi_device *device; /* the device we are in */ 198 struct work_struct work;
199 acpi_handle handle; /* the handle of the hotk device */ 199 struct led_classdev led;
200 char status; /* status of the hotk, for LEDs, ... */ 200 struct asus_laptop *asus;
201 u32 ledd_status; /* status of the LED display */ 201 const char *method;
202 u8 light_level; /* light sensor level */
203 u8 light_switch; /* light sensor switch value */
204 u16 event_count[128]; /* count for each event TODO make this better */
205 struct input_dev *inputdev;
206 u16 *keycode_map;
207}; 202};
208 203
209/* 204/*
210 * This header is made available to allow proper configuration given model, 205 * This is the main structure, we can use it to store anything interesting
211 * revision number , ... this info cannot go in struct asus_hotk because it is 206 * about the hotk device
212 * available before the hotk
213 */
214static struct acpi_table_header *asus_info;
215
216/* The actual device the driver binds to */
217static struct asus_hotk *hotk;
218
219/*
220 * The hotkey driver declaration
221 */ 207 */
222static const struct acpi_device_id asus_device_ids[] = { 208struct asus_laptop {
223 {"ATK0100", 0}, 209 char *name; /* laptop name */
224 {"ATK0101", 0},
225 {"", 0},
226};
227MODULE_DEVICE_TABLE(acpi, asus_device_ids);
228 210
229static int asus_hotk_add(struct acpi_device *device); 211 struct acpi_table_header *dsdt_info;
230static int asus_hotk_remove(struct acpi_device *device, int type); 212 struct platform_device *platform_device;
231static void asus_hotk_notify(struct acpi_device *device, u32 event); 213 struct acpi_device *device; /* the device we are in */
214 struct backlight_device *backlight_device;
232 215
233static struct acpi_driver asus_hotk_driver = { 216 struct input_dev *inputdev;
234 .name = ASUS_HOTK_NAME, 217 struct key_entry *keymap;
235 .class = ASUS_HOTK_CLASS,
236 .owner = THIS_MODULE,
237 .ids = asus_device_ids,
238 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
239 .ops = {
240 .add = asus_hotk_add,
241 .remove = asus_hotk_remove,
242 .notify = asus_hotk_notify,
243 },
244};
245 218
246/* The backlight device /sys/class/backlight */ 219 struct asus_led mled;
247static struct backlight_device *asus_backlight_device; 220 struct asus_led tled;
221 struct asus_led rled;
222 struct asus_led pled;
223 struct asus_led gled;
224 struct asus_led kled;
225 struct workqueue_struct *led_workqueue;
248 226
249/* 227 int wireless_status;
250 * The backlight class declaration 228 bool have_rsts;
251 */ 229 int lcd_state;
252static int read_brightness(struct backlight_device *bd);
253static int update_bl_status(struct backlight_device *bd);
254static struct backlight_ops asusbl_ops = {
255 .get_brightness = read_brightness,
256 .update_status = update_bl_status,
257};
258 230
259/* 231 struct rfkill *gps_rfkill;
260 * These functions actually update the LED's, and are called from a
261 * workqueue. By doing this as separate work rather than when the LED
262 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
263 * potentially bad time, such as a timer interrupt.
264 */
265static struct workqueue_struct *led_workqueue;
266
267#define ASUS_LED(object, ledname, max) \
268 static void object##_led_set(struct led_classdev *led_cdev, \
269 enum led_brightness value); \
270 static enum led_brightness object##_led_get( \
271 struct led_classdev *led_cdev); \
272 static void object##_led_update(struct work_struct *ignored); \
273 static int object##_led_wk; \
274 static DECLARE_WORK(object##_led_work, object##_led_update); \
275 static struct led_classdev object##_led = { \
276 .name = "asus::" ledname, \
277 .brightness_set = object##_led_set, \
278 .brightness_get = object##_led_get, \
279 .max_brightness = max \
280 }
281 232
282ASUS_LED(mled, "mail", 1); 233 acpi_handle handle; /* the handle of the hotk device */
283ASUS_LED(tled, "touchpad", 1); 234 u32 ledd_status; /* status of the LED display */
284ASUS_LED(rled, "record", 1); 235 u8 light_level; /* light sensor level */
285ASUS_LED(pled, "phone", 1); 236 u8 light_switch; /* light sensor switch value */
286ASUS_LED(gled, "gaming", 1); 237 u16 event_count[128]; /* count for each event TODO make this better */
287ASUS_LED(kled, "kbd_backlight", 3); 238 u16 *keycode_map;
288
289struct key_entry {
290 char type;
291 u8 code;
292 u16 keycode;
293}; 239};
294 240
295enum { KE_KEY, KE_END }; 241static const struct key_entry asus_keymap[] = {
296 242 /* Lenovo SL Specific keycodes */
297static struct key_entry asus_keymap[] = { 243 {KE_KEY, 0x02, { KEY_SCREENLOCK } },
298 {KE_KEY, 0x02, KEY_SCREENLOCK}, 244 {KE_KEY, 0x05, { KEY_WLAN } },
299 {KE_KEY, 0x05, KEY_WLAN}, 245 {KE_KEY, 0x08, { KEY_F13 } },
300 {KE_KEY, 0x08, KEY_F13}, 246 {KE_KEY, 0x17, { KEY_ZOOM } },
301 {KE_KEY, 0x17, KEY_ZOOM}, 247 {KE_KEY, 0x1f, { KEY_BATTERY } },
302 {KE_KEY, 0x1f, KEY_BATTERY}, 248 /* End of Lenovo SL Specific keycodes */
303 {KE_KEY, 0x30, KEY_VOLUMEUP}, 249 {KE_KEY, 0x30, { KEY_VOLUMEUP } },
304 {KE_KEY, 0x31, KEY_VOLUMEDOWN}, 250 {KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
305 {KE_KEY, 0x32, KEY_MUTE}, 251 {KE_KEY, 0x32, { KEY_MUTE } },
306 {KE_KEY, 0x33, KEY_SWITCHVIDEOMODE}, 252 {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
307 {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE}, 253 {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
308 {KE_KEY, 0x40, KEY_PREVIOUSSONG}, 254 {KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
309 {KE_KEY, 0x41, KEY_NEXTSONG}, 255 {KE_KEY, 0x41, { KEY_NEXTSONG } },
310 {KE_KEY, 0x43, KEY_STOPCD}, 256 {KE_KEY, 0x43, { KEY_STOPCD } },
311 {KE_KEY, 0x45, KEY_PLAYPAUSE}, 257 {KE_KEY, 0x45, { KEY_PLAYPAUSE } },
312 {KE_KEY, 0x4c, KEY_MEDIA}, 258 {KE_KEY, 0x4c, { KEY_MEDIA } },
313 {KE_KEY, 0x50, KEY_EMAIL}, 259 {KE_KEY, 0x50, { KEY_EMAIL } },
314 {KE_KEY, 0x51, KEY_WWW}, 260 {KE_KEY, 0x51, { KEY_WWW } },
315 {KE_KEY, 0x55, KEY_CALC}, 261 {KE_KEY, 0x55, { KEY_CALC } },
316 {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */ 262 {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
317 {KE_KEY, 0x5D, KEY_WLAN}, 263 {KE_KEY, 0x5D, { KEY_WLAN } },
318 {KE_KEY, 0x5E, KEY_WLAN}, 264 {KE_KEY, 0x5E, { KEY_WLAN } },
319 {KE_KEY, 0x5F, KEY_WLAN}, 265 {KE_KEY, 0x5F, { KEY_WLAN } },
320 {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE}, 266 {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
321 {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE}, 267 {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
322 {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE}, 268 {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
323 {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE}, 269 {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
324 {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */ 270 {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
325 {KE_KEY, 0x82, KEY_CAMERA}, 271 {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
326 {KE_KEY, 0x88, KEY_WLAN }, 272 {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
327 {KE_KEY, 0x8A, KEY_PROG1}, 273 {KE_KEY, 0x82, { KEY_CAMERA } },
328 {KE_KEY, 0x95, KEY_MEDIA}, 274 {KE_KEY, 0x88, { KEY_WLAN } },
329 {KE_KEY, 0x99, KEY_PHONE}, 275 {KE_KEY, 0x8A, { KEY_PROG1 } },
330 {KE_KEY, 0xc4, KEY_KBDILLUMUP}, 276 {KE_KEY, 0x95, { KEY_MEDIA } },
331 {KE_KEY, 0xc5, KEY_KBDILLUMDOWN}, 277 {KE_KEY, 0x99, { KEY_PHONE } },
278 {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
279 {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
332 {KE_END, 0}, 280 {KE_END, 0},
333}; 281};
334 282
283
335/* 284/*
336 * This function evaluates an ACPI method, given an int as parameter, the 285 * This function evaluates an ACPI method, given an int as parameter, the
337 * method is searched within the scope of the handle, can be NULL. The output 286 * method is searched within the scope of the handle, can be NULL. The output
@@ -339,8 +288,8 @@ static struct key_entry asus_keymap[] = {
339 * 288 *
340 * returns 0 if write is successful, -1 else. 289 * returns 0 if write is successful, -1 else.
341 */ 290 */
342static int write_acpi_int(acpi_handle handle, const char *method, int val, 291static int write_acpi_int_ret(acpi_handle handle, const char *method, int val,
343 struct acpi_buffer *output) 292 struct acpi_buffer *output)
344{ 293{
345 struct acpi_object_list params; /* list of input parameters (an int) */ 294 struct acpi_object_list params; /* list of input parameters (an int) */
346 union acpi_object in_obj; /* the only param we use */ 295 union acpi_object in_obj; /* the only param we use */
@@ -361,102 +310,82 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
361 return -1; 310 return -1;
362} 311}
363 312
364static int read_wireless_status(int mask) 313static int write_acpi_int(acpi_handle handle, const char *method, int val)
365{ 314{
366 unsigned long long status; 315 return write_acpi_int_ret(handle, method, val, NULL);
367 acpi_status rv = AE_OK; 316}
317
318static int acpi_check_handle(acpi_handle handle, const char *method,
319 acpi_handle *ret)
320{
321 acpi_status status;
368 322
369 if (!wireless_status_handle) 323 if (method == NULL)
370 return (hotk->status & mask) ? 1 : 0; 324 return -ENODEV;
371 325
372 rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); 326 if (ret)
373 if (ACPI_FAILURE(rv)) 327 status = acpi_get_handle(handle, (char *)method,
374 pr_warning("Error reading Wireless status\n"); 328 ret);
375 else 329 else {
376 return (status & mask) ? 1 : 0; 330 acpi_handle dummy;
377 331
378 return (hotk->status & mask) ? 1 : 0; 332 status = acpi_get_handle(handle, (char *)method,
333 &dummy);
334 }
335
336 if (status != AE_OK) {
337 if (ret)
338 pr_warning("Error finding %s\n", method);
339 return -ENODEV;
340 }
341 return 0;
379} 342}
380 343
381static int read_gps_status(void) 344/* Generic LED function */
345static int asus_led_set(struct asus_laptop *asus, const char *method,
346 int value)
382{ 347{
383 unsigned long long status; 348 if (!strcmp(method, METHOD_MLED))
384 acpi_status rv = AE_OK; 349 value = !value;
385 350 else if (!strcmp(method, METHOD_GLED))
386 rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); 351 value = !value + 1;
387 if (ACPI_FAILURE(rv))
388 pr_warning("Error reading GPS status\n");
389 else 352 else
390 return status ? 1 : 0; 353 value = !!value;
391 354
392 return (hotk->status & GPS_ON) ? 1 : 0; 355 return write_acpi_int(asus->handle, method, value);
393} 356}
394 357
395/* Generic LED functions */ 358/*
396static int read_status(int mask) 359 * LEDs
360 */
361/* /sys/class/led handlers */
362static void asus_led_cdev_set(struct led_classdev *led_cdev,
363 enum led_brightness value)
397{ 364{
398 /* There is a special method for both wireless devices */ 365 struct asus_led *led = container_of(led_cdev, struct asus_led, led);
399 if (mask == BT_ON || mask == WL_ON) 366 struct asus_laptop *asus = led->asus;
400 return read_wireless_status(mask);
401 else if (mask == GPS_ON)
402 return read_gps_status();
403 367
404 return (hotk->status & mask) ? 1 : 0; 368 led->wk = !!value;
369 queue_work(asus->led_workqueue, &led->work);
405} 370}
406 371
407static void write_status(acpi_handle handle, int out, int mask) 372static void asus_led_cdev_update(struct work_struct *work)
408{ 373{
409 hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask); 374 struct asus_led *led = container_of(work, struct asus_led, work);
410 375 struct asus_laptop *asus = led->asus;
411 switch (mask) {
412 case MLED_ON:
413 out = !(out & 0x1);
414 break;
415 case GLED_ON:
416 out = (out & 0x1) + 1;
417 break;
418 case GPS_ON:
419 handle = (out) ? gps_on_handle : gps_off_handle;
420 out = 0x02;
421 break;
422 default:
423 out &= 0x1;
424 break;
425 }
426 376
427 if (write_acpi_int(handle, NULL, out, NULL)) 377 asus_led_set(asus, led->method, led->wk);
428 pr_warning(" write failed %x\n", mask);
429} 378}
430 379
431/* /sys/class/led handlers */ 380static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev)
432#define ASUS_LED_HANDLER(object, mask) \ 381{
433 static void object##_led_set(struct led_classdev *led_cdev, \ 382 return led_cdev->brightness;
434 enum led_brightness value) \ 383}
435 { \
436 object##_led_wk = (value > 0) ? 1 : 0; \
437 queue_work(led_workqueue, &object##_led_work); \
438 } \
439 static void object##_led_update(struct work_struct *ignored) \
440 { \
441 int value = object##_led_wk; \
442 write_status(object##_set_handle, value, (mask)); \
443 } \
444 static enum led_brightness object##_led_get( \
445 struct led_classdev *led_cdev) \
446 { \
447 return led_cdev->brightness; \
448 }
449
450ASUS_LED_HANDLER(mled, MLED_ON);
451ASUS_LED_HANDLER(pled, PLED_ON);
452ASUS_LED_HANDLER(rled, RLED_ON);
453ASUS_LED_HANDLER(tled, TLED_ON);
454ASUS_LED_HANDLER(gled, GLED_ON);
455 384
456/* 385/*
457 * Keyboard backlight 386 * Keyboard backlight (also a LED)
458 */ 387 */
459static int get_kled_lvl(void) 388static int asus_kled_lvl(struct asus_laptop *asus)
460{ 389{
461 unsigned long long kblv; 390 unsigned long long kblv;
462 struct acpi_object_list params; 391 struct acpi_object_list params;
@@ -468,75 +397,183 @@ static int get_kled_lvl(void)
468 in_obj.type = ACPI_TYPE_INTEGER; 397 in_obj.type = ACPI_TYPE_INTEGER;
469 in_obj.integer.value = 2; 398 in_obj.integer.value = 2;
470 399
471 rv = acpi_evaluate_integer(kled_get_handle, NULL, &params, &kblv); 400 rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
401 &params, &kblv);
472 if (ACPI_FAILURE(rv)) { 402 if (ACPI_FAILURE(rv)) {
473 pr_warning("Error reading kled level\n"); 403 pr_warning("Error reading kled level\n");
474 return 0; 404 return -ENODEV;
475 } 405 }
476 return kblv; 406 return kblv;
477} 407}
478 408
479static int set_kled_lvl(int kblv) 409static int asus_kled_set(struct asus_laptop *asus, int kblv)
480{ 410{
481 if (kblv > 0) 411 if (kblv > 0)
482 kblv = (1 << 7) | (kblv & 0x7F); 412 kblv = (1 << 7) | (kblv & 0x7F);
483 else 413 else
484 kblv = 0; 414 kblv = 0;
485 415
486 if (write_acpi_int(kled_set_handle, NULL, kblv, NULL)) { 416 if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
487 pr_warning("Keyboard LED display write failed\n"); 417 pr_warning("Keyboard LED display write failed\n");
488 return -EINVAL; 418 return -EINVAL;
489 } 419 }
490 return 0; 420 return 0;
491} 421}
492 422
493static void kled_led_set(struct led_classdev *led_cdev, 423static void asus_kled_cdev_set(struct led_classdev *led_cdev,
494 enum led_brightness value) 424 enum led_brightness value)
495{ 425{
496 kled_led_wk = value; 426 struct asus_led *led = container_of(led_cdev, struct asus_led, led);
497 queue_work(led_workqueue, &kled_led_work); 427 struct asus_laptop *asus = led->asus;
428
429 led->wk = value;
430 queue_work(asus->led_workqueue, &led->work);
498} 431}
499 432
500static void kled_led_update(struct work_struct *ignored) 433static void asus_kled_cdev_update(struct work_struct *work)
501{ 434{
502 set_kled_lvl(kled_led_wk); 435 struct asus_led *led = container_of(work, struct asus_led, work);
436 struct asus_laptop *asus = led->asus;
437
438 asus_kled_set(asus, led->wk);
503} 439}
504 440
505static enum led_brightness kled_led_get(struct led_classdev *led_cdev) 441static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
506{ 442{
507 return get_kled_lvl(); 443 struct asus_led *led = container_of(led_cdev, struct asus_led, led);
444 struct asus_laptop *asus = led->asus;
445
446 return asus_kled_lvl(asus);
508} 447}
509 448
510static int get_lcd_state(void) 449static void asus_led_exit(struct asus_laptop *asus)
511{ 450{
512 return read_status(LCD_ON); 451 if (asus->mled.led.dev)
452 led_classdev_unregister(&asus->mled.led);
453 if (asus->tled.led.dev)
454 led_classdev_unregister(&asus->tled.led);
455 if (asus->pled.led.dev)
456 led_classdev_unregister(&asus->pled.led);
457 if (asus->rled.led.dev)
458 led_classdev_unregister(&asus->rled.led);
459 if (asus->gled.led.dev)
460 led_classdev_unregister(&asus->gled.led);
461 if (asus->kled.led.dev)
462 led_classdev_unregister(&asus->kled.led);
463 if (asus->led_workqueue) {
464 destroy_workqueue(asus->led_workqueue);
465 asus->led_workqueue = NULL;
466 }
513} 467}
514 468
515static int set_lcd_state(int value) 469/* Ugly macro, need to fix that later */
470static int asus_led_register(struct asus_laptop *asus,
471 struct asus_led *led,
472 const char *name, const char *method)
473{
474 struct led_classdev *led_cdev = &led->led;
475
476 if (!method || acpi_check_handle(asus->handle, method, NULL))
477 return 0; /* Led not present */
478
479 led->asus = asus;
480 led->method = method;
481
482 INIT_WORK(&led->work, asus_led_cdev_update);
483 led_cdev->name = name;
484 led_cdev->brightness_set = asus_led_cdev_set;
485 led_cdev->brightness_get = asus_led_cdev_get;
486 led_cdev->max_brightness = 1;
487 return led_classdev_register(&asus->platform_device->dev, led_cdev);
488}
489
490static int asus_led_init(struct asus_laptop *asus)
491{
492 int r;
493
494 /*
495 * Functions that actually update the LED's are called from a
496 * workqueue. By doing this as separate work rather than when the LED
497 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
498 * potentially bad time, such as a timer interrupt.
499 */
500 asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
501 if (!asus->led_workqueue)
502 return -ENOMEM;
503
504 r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED);
505 if (r)
506 goto error;
507 r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED);
508 if (r)
509 goto error;
510 r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED);
511 if (r)
512 goto error;
513 r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED);
514 if (r)
515 goto error;
516 r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED);
517 if (r)
518 goto error;
519 if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) &&
520 !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) {
521 struct asus_led *led = &asus->kled;
522 struct led_classdev *cdev = &led->led;
523
524 led->asus = asus;
525
526 INIT_WORK(&led->work, asus_kled_cdev_update);
527 cdev->name = "asus::kbd_backlight";
528 cdev->brightness_set = asus_kled_cdev_set;
529 cdev->brightness_get = asus_kled_cdev_get;
530 cdev->max_brightness = 3;
531 r = led_classdev_register(&asus->platform_device->dev, cdev);
532 }
533error:
534 if (r)
535 asus_led_exit(asus);
536 return r;
537}
538
539/*
540 * Backlight device
541 */
542static int asus_lcd_status(struct asus_laptop *asus)
543{
544 return asus->lcd_state;
545}
546
547static int asus_lcd_set(struct asus_laptop *asus, int value)
516{ 548{
517 int lcd = 0; 549 int lcd = 0;
518 acpi_status status = 0; 550 acpi_status status = 0;
519 551
520 lcd = value ? 1 : 0; 552 lcd = !!value;
521 553
522 if (lcd == get_lcd_state()) 554 if (lcd == asus_lcd_status(asus))
523 return 0; 555 return 0;
524 556
525 if (lcd_switch_handle) { 557 if (!lcd_switch_handle)
526 status = acpi_evaluate_object(lcd_switch_handle, 558 return -ENODEV;
527 NULL, NULL, NULL); 559
560 status = acpi_evaluate_object(lcd_switch_handle,
561 NULL, NULL, NULL);
528 562
529 if (ACPI_FAILURE(status)) 563 if (ACPI_FAILURE(status)) {
530 pr_warning("Error switching LCD\n"); 564 pr_warning("Error switching LCD\n");
565 return -ENODEV;
531 } 566 }
532 567
533 write_status(NULL, lcd, LCD_ON); 568 asus->lcd_state = lcd;
534 return 0; 569 return 0;
535} 570}
536 571
537static void lcd_blank(int blank) 572static void lcd_blank(struct asus_laptop *asus, int blank)
538{ 573{
539 struct backlight_device *bd = asus_backlight_device; 574 struct backlight_device *bd = asus->backlight_device;
575
576 asus->lcd_state = (blank == FB_BLANK_UNBLANK);
540 577
541 if (bd) { 578 if (bd) {
542 bd->props.power = blank; 579 bd->props.power = blank;
@@ -544,44 +581,91 @@ static void lcd_blank(int blank)
544 } 581 }
545} 582}
546 583
547static int read_brightness(struct backlight_device *bd) 584static int asus_read_brightness(struct backlight_device *bd)
548{ 585{
586 struct asus_laptop *asus = bl_get_data(bd);
549 unsigned long long value; 587 unsigned long long value;
550 acpi_status rv = AE_OK; 588 acpi_status rv = AE_OK;
551 589
552 rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); 590 rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
591 NULL, &value);
553 if (ACPI_FAILURE(rv)) 592 if (ACPI_FAILURE(rv))
554 pr_warning("Error reading brightness\n"); 593 pr_warning("Error reading brightness\n");
555 594
556 return value; 595 return value;
557} 596}
558 597
559static int set_brightness(struct backlight_device *bd, int value) 598static int asus_set_brightness(struct backlight_device *bd, int value)
560{ 599{
561 int ret = 0; 600 struct asus_laptop *asus = bl_get_data(bd);
562
563 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
564 /* 0 <= value <= 15 */
565 601
566 if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { 602 if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
567 pr_warning("Error changing brightness\n"); 603 pr_warning("Error changing brightness\n");
568 ret = -EIO; 604 return -EIO;
569 } 605 }
570 606 return 0;
571 return ret;
572} 607}
573 608
574static int update_bl_status(struct backlight_device *bd) 609static int update_bl_status(struct backlight_device *bd)
575{ 610{
611 struct asus_laptop *asus = bl_get_data(bd);
576 int rv; 612 int rv;
577 int value = bd->props.brightness; 613 int value = bd->props.brightness;
578 614
579 rv = set_brightness(bd, value); 615 rv = asus_set_brightness(bd, value);
580 if (rv) 616 if (rv)
581 return rv; 617 return rv;
582 618
583 value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0; 619 value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
584 return set_lcd_state(value); 620 return asus_lcd_set(asus, value);
621}
622
623static struct backlight_ops asusbl_ops = {
624 .get_brightness = asus_read_brightness,
625 .update_status = update_bl_status,
626};
627
628static int asus_backlight_notify(struct asus_laptop *asus)
629{
630 struct backlight_device *bd = asus->backlight_device;
631 int old = bd->props.brightness;
632
633 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
634
635 return old;
636}
637
638static int asus_backlight_init(struct asus_laptop *asus)
639{
640 struct backlight_device *bd;
641 struct device *dev = &asus->platform_device->dev;
642
643 if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
644 !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
645 lcd_switch_handle) {
646 bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
647 asus, &asusbl_ops);
648 if (IS_ERR(bd)) {
649 pr_err("Could not register asus backlight device\n");
650 asus->backlight_device = NULL;
651 return PTR_ERR(bd);
652 }
653
654 asus->backlight_device = bd;
655
656 bd->props.max_brightness = 15;
657 bd->props.power = FB_BLANK_UNBLANK;
658 bd->props.brightness = asus_read_brightness(bd);
659 backlight_update_status(bd);
660 }
661 return 0;
662}
663
664static void asus_backlight_exit(struct asus_laptop *asus)
665{
666 if (asus->backlight_device)
667 backlight_device_unregister(asus->backlight_device);
668 asus->backlight_device = NULL;
585} 669}
586 670
587/* 671/*
@@ -596,25 +680,26 @@ static int update_bl_status(struct backlight_device *bd)
596static ssize_t show_infos(struct device *dev, 680static ssize_t show_infos(struct device *dev,
597 struct device_attribute *attr, char *page) 681 struct device_attribute *attr, char *page)
598{ 682{
683 struct asus_laptop *asus = dev_get_drvdata(dev);
599 int len = 0; 684 int len = 0;
600 unsigned long long temp; 685 unsigned long long temp;
601 char buf[16]; /* enough for all info */ 686 char buf[16]; /* enough for all info */
602 acpi_status rv = AE_OK; 687 acpi_status rv = AE_OK;
603 688
604 /* 689 /*
605 * We use the easy way, we don't care of off and count, so we don't set eof 690 * We use the easy way, we don't care of off and count,
606 * to 1 691 * so we don't set eof to 1
607 */ 692 */
608 693
609 len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n"); 694 len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
610 len += sprintf(page + len, "Model reference : %s\n", hotk->name); 695 len += sprintf(page + len, "Model reference : %s\n", asus->name);
611 /* 696 /*
612 * The SFUN method probably allows the original driver to get the list 697 * The SFUN method probably allows the original driver to get the list
613 * of features supported by a given model. For now, 0x0100 or 0x0800 698 * of features supported by a given model. For now, 0x0100 or 0x0800
614 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. 699 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
615 * The significance of others is yet to be found. 700 * The significance of others is yet to be found.
616 */ 701 */
617 rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp); 702 rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
618 if (!ACPI_FAILURE(rv)) 703 if (!ACPI_FAILURE(rv))
619 len += sprintf(page + len, "SFUN value : %#x\n", 704 len += sprintf(page + len, "SFUN value : %#x\n",
620 (uint) temp); 705 (uint) temp);
@@ -624,7 +709,7 @@ static ssize_t show_infos(struct device *dev,
624 * The significance of others is yet to be found. 709 * The significance of others is yet to be found.
625 * If we don't find the method, we assume the device are present. 710 * If we don't find the method, we assume the device are present.
626 */ 711 */
627 rv = acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &temp); 712 rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
628 if (!ACPI_FAILURE(rv)) 713 if (!ACPI_FAILURE(rv))
629 len += sprintf(page + len, "HRWS value : %#x\n", 714 len += sprintf(page + len, "HRWS value : %#x\n",
630 (uint) temp); 715 (uint) temp);
@@ -635,26 +720,26 @@ static ssize_t show_infos(struct device *dev,
635 * Note: since not all the laptops provide this method, errors are 720 * Note: since not all the laptops provide this method, errors are
636 * silently ignored. 721 * silently ignored.
637 */ 722 */
638 rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp); 723 rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
639 if (!ACPI_FAILURE(rv)) 724 if (!ACPI_FAILURE(rv))
640 len += sprintf(page + len, "ASYM value : %#x\n", 725 len += sprintf(page + len, "ASYM value : %#x\n",
641 (uint) temp); 726 (uint) temp);
642 if (asus_info) { 727 if (asus->dsdt_info) {
643 snprintf(buf, 16, "%d", asus_info->length); 728 snprintf(buf, 16, "%d", asus->dsdt_info->length);
644 len += sprintf(page + len, "DSDT length : %s\n", buf); 729 len += sprintf(page + len, "DSDT length : %s\n", buf);
645 snprintf(buf, 16, "%d", asus_info->checksum); 730 snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
646 len += sprintf(page + len, "DSDT checksum : %s\n", buf); 731 len += sprintf(page + len, "DSDT checksum : %s\n", buf);
647 snprintf(buf, 16, "%d", asus_info->revision); 732 snprintf(buf, 16, "%d", asus->dsdt_info->revision);
648 len += sprintf(page + len, "DSDT revision : %s\n", buf); 733 len += sprintf(page + len, "DSDT revision : %s\n", buf);
649 snprintf(buf, 7, "%s", asus_info->oem_id); 734 snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
650 len += sprintf(page + len, "OEM id : %s\n", buf); 735 len += sprintf(page + len, "OEM id : %s\n", buf);
651 snprintf(buf, 9, "%s", asus_info->oem_table_id); 736 snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
652 len += sprintf(page + len, "OEM table id : %s\n", buf); 737 len += sprintf(page + len, "OEM table id : %s\n", buf);
653 snprintf(buf, 16, "%x", asus_info->oem_revision); 738 snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
654 len += sprintf(page + len, "OEM revision : 0x%s\n", buf); 739 len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
655 snprintf(buf, 5, "%s", asus_info->asl_compiler_id); 740 snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
656 len += sprintf(page + len, "ASL comp vendor id : %s\n", buf); 741 len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
657 snprintf(buf, 16, "%x", asus_info->asl_compiler_revision); 742 snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
658 len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf); 743 len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
659 } 744 }
660 745
@@ -672,8 +757,9 @@ static int parse_arg(const char *buf, unsigned long count, int *val)
672 return count; 757 return count;
673} 758}
674 759
675static ssize_t store_status(const char *buf, size_t count, 760static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
676 acpi_handle handle, int mask) 761 const char *buf, size_t count,
762 const char *method)
677{ 763{
678 int rv, value; 764 int rv, value;
679 int out = 0; 765 int out = 0;
@@ -682,8 +768,8 @@ static ssize_t store_status(const char *buf, size_t count,
682 if (rv > 0) 768 if (rv > 0)
683 out = value ? 1 : 0; 769 out = value ? 1 : 0;
684 770
685 write_status(handle, out, mask); 771 if (write_acpi_int(asus->handle, method, value))
686 772 return -ENODEV;
687 return rv; 773 return rv;
688} 774}
689 775
@@ -693,67 +779,116 @@ static ssize_t store_status(const char *buf, size_t count,
693static ssize_t show_ledd(struct device *dev, 779static ssize_t show_ledd(struct device *dev,
694 struct device_attribute *attr, char *buf) 780 struct device_attribute *attr, char *buf)
695{ 781{
696 return sprintf(buf, "0x%08x\n", hotk->ledd_status); 782 struct asus_laptop *asus = dev_get_drvdata(dev);
783
784 return sprintf(buf, "0x%08x\n", asus->ledd_status);
697} 785}
698 786
699static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, 787static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
700 const char *buf, size_t count) 788 const char *buf, size_t count)
701{ 789{
790 struct asus_laptop *asus = dev_get_drvdata(dev);
702 int rv, value; 791 int rv, value;
703 792
704 rv = parse_arg(buf, count, &value); 793 rv = parse_arg(buf, count, &value);
705 if (rv > 0) { 794 if (rv > 0) {
706 if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) 795 if (write_acpi_int(asus->handle, METHOD_LEDD, value))
707 pr_warning("LED display write failed\n"); 796 pr_warning("LED display write failed\n");
708 else 797 else
709 hotk->ledd_status = (u32) value; 798 asus->ledd_status = (u32) value;
710 } 799 }
711 return rv; 800 return rv;
712} 801}
713 802
714/* 803/*
804 * Wireless
805 */
806static int asus_wireless_status(struct asus_laptop *asus, int mask)
807{
808 unsigned long long status;
809 acpi_status rv = AE_OK;
810
811 if (!asus->have_rsts)
812 return (asus->wireless_status & mask) ? 1 : 0;
813
814 rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
815 NULL, &status);
816 if (ACPI_FAILURE(rv)) {
817 pr_warning("Error reading Wireless status\n");
818 return -EINVAL;
819 }
820 return !!(status & mask);
821}
822
823/*
715 * WLAN 824 * WLAN
716 */ 825 */
826static int asus_wlan_set(struct asus_laptop *asus, int status)
827{
828 if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
829 pr_warning("Error setting wlan status to %d", status);
830 return -EIO;
831 }
832 return 0;
833}
834
717static ssize_t show_wlan(struct device *dev, 835static ssize_t show_wlan(struct device *dev,
718 struct device_attribute *attr, char *buf) 836 struct device_attribute *attr, char *buf)
719{ 837{
720 return sprintf(buf, "%d\n", read_status(WL_ON)); 838 struct asus_laptop *asus = dev_get_drvdata(dev);
839
840 return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
721} 841}
722 842
723static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, 843static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
724 const char *buf, size_t count) 844 const char *buf, size_t count)
725{ 845{
726 return store_status(buf, count, wl_switch_handle, WL_ON); 846 struct asus_laptop *asus = dev_get_drvdata(dev);
847
848 return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
727} 849}
728 850
729/* 851/*
730 * Bluetooth 852 * Bluetooth
731 */ 853 */
854static int asus_bluetooth_set(struct asus_laptop *asus, int status)
855{
856 if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
857 pr_warning("Error setting bluetooth status to %d", status);
858 return -EIO;
859 }
860 return 0;
861}
862
732static ssize_t show_bluetooth(struct device *dev, 863static ssize_t show_bluetooth(struct device *dev,
733 struct device_attribute *attr, char *buf) 864 struct device_attribute *attr, char *buf)
734{ 865{
735 return sprintf(buf, "%d\n", read_status(BT_ON)); 866 struct asus_laptop *asus = dev_get_drvdata(dev);
867
868 return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
736} 869}
737 870
738static ssize_t store_bluetooth(struct device *dev, 871static ssize_t store_bluetooth(struct device *dev,
739 struct device_attribute *attr, const char *buf, 872 struct device_attribute *attr, const char *buf,
740 size_t count) 873 size_t count)
741{ 874{
742 return store_status(buf, count, bt_switch_handle, BT_ON); 875 struct asus_laptop *asus = dev_get_drvdata(dev);
876
877 return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
743} 878}
744 879
745/* 880/*
746 * Display 881 * Display
747 */ 882 */
748static void set_display(int value) 883static void asus_set_display(struct asus_laptop *asus, int value)
749{ 884{
750 /* no sanity check needed for now */ 885 /* no sanity check needed for now */
751 if (write_acpi_int(display_set_handle, NULL, value, NULL)) 886 if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
752 pr_warning("Error setting display\n"); 887 pr_warning("Error setting display\n");
753 return; 888 return;
754} 889}
755 890
756static int read_display(void) 891static int read_display(struct asus_laptop *asus)
757{ 892{
758 unsigned long long value = 0; 893 unsigned long long value = 0;
759 acpi_status rv = AE_OK; 894 acpi_status rv = AE_OK;
@@ -769,7 +904,7 @@ static int read_display(void)
769 pr_warning("Error reading display status\n"); 904 pr_warning("Error reading display status\n");
770 } 905 }
771 906
772 value &= 0x0F; /* needed for some models, shouldn't hurt others */ 907 value &= 0x0F; /* needed for some models, shouldn't hurt others */
773 908
774 return value; 909 return value;
775} 910}
@@ -781,7 +916,11 @@ static int read_display(void)
781static ssize_t show_disp(struct device *dev, 916static ssize_t show_disp(struct device *dev,
782 struct device_attribute *attr, char *buf) 917 struct device_attribute *attr, char *buf)
783{ 918{
784 return sprintf(buf, "%d\n", read_display()); 919 struct asus_laptop *asus = dev_get_drvdata(dev);
920
921 if (!display_get_handle)
922 return -ENODEV;
923 return sprintf(buf, "%d\n", read_display(asus));
785} 924}
786 925
787/* 926/*
@@ -794,65 +933,72 @@ static ssize_t show_disp(struct device *dev,
794static ssize_t store_disp(struct device *dev, struct device_attribute *attr, 933static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
795 const char *buf, size_t count) 934 const char *buf, size_t count)
796{ 935{
936 struct asus_laptop *asus = dev_get_drvdata(dev);
797 int rv, value; 937 int rv, value;
798 938
799 rv = parse_arg(buf, count, &value); 939 rv = parse_arg(buf, count, &value);
800 if (rv > 0) 940 if (rv > 0)
801 set_display(value); 941 asus_set_display(asus, value);
802 return rv; 942 return rv;
803} 943}
804 944
805/* 945/*
806 * Light Sens 946 * Light Sens
807 */ 947 */
808static void set_light_sens_switch(int value) 948static void asus_als_switch(struct asus_laptop *asus, int value)
809{ 949{
810 if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) 950 if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
811 pr_warning("Error setting light sensor switch\n"); 951 pr_warning("Error setting light sensor switch\n");
812 hotk->light_switch = value; 952 asus->light_switch = value;
813} 953}
814 954
815static ssize_t show_lssw(struct device *dev, 955static ssize_t show_lssw(struct device *dev,
816 struct device_attribute *attr, char *buf) 956 struct device_attribute *attr, char *buf)
817{ 957{
818 return sprintf(buf, "%d\n", hotk->light_switch); 958 struct asus_laptop *asus = dev_get_drvdata(dev);
959
960 return sprintf(buf, "%d\n", asus->light_switch);
819} 961}
820 962
821static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, 963static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
822 const char *buf, size_t count) 964 const char *buf, size_t count)
823{ 965{
966 struct asus_laptop *asus = dev_get_drvdata(dev);
824 int rv, value; 967 int rv, value;
825 968
826 rv = parse_arg(buf, count, &value); 969 rv = parse_arg(buf, count, &value);
827 if (rv > 0) 970 if (rv > 0)
828 set_light_sens_switch(value ? 1 : 0); 971 asus_als_switch(asus, value ? 1 : 0);
829 972
830 return rv; 973 return rv;
831} 974}
832 975
833static void set_light_sens_level(int value) 976static void asus_als_level(struct asus_laptop *asus, int value)
834{ 977{
835 if (write_acpi_int(ls_level_handle, NULL, value, NULL)) 978 if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
836 pr_warning("Error setting light sensor level\n"); 979 pr_warning("Error setting light sensor level\n");
837 hotk->light_level = value; 980 asus->light_level = value;
838} 981}
839 982
840static ssize_t show_lslvl(struct device *dev, 983static ssize_t show_lslvl(struct device *dev,
841 struct device_attribute *attr, char *buf) 984 struct device_attribute *attr, char *buf)
842{ 985{
843 return sprintf(buf, "%d\n", hotk->light_level); 986 struct asus_laptop *asus = dev_get_drvdata(dev);
987
988 return sprintf(buf, "%d\n", asus->light_level);
844} 989}
845 990
846static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, 991static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
847 const char *buf, size_t count) 992 const char *buf, size_t count)
848{ 993{
994 struct asus_laptop *asus = dev_get_drvdata(dev);
849 int rv, value; 995 int rv, value;
850 996
851 rv = parse_arg(buf, count, &value); 997 rv = parse_arg(buf, count, &value);
852 if (rv > 0) { 998 if (rv > 0) {
853 value = (0 < value) ? ((15 < value) ? 15 : value) : 0; 999 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
854 /* 0 <= value <= 15 */ 1000 /* 0 <= value <= 15 */
855 set_light_sens_level(value); 1001 asus_als_level(asus, value);
856 } 1002 }
857 1003
858 return rv; 1004 return rv;
@@ -861,197 +1007,309 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
861/* 1007/*
862 * GPS 1008 * GPS
863 */ 1009 */
1010static int asus_gps_status(struct asus_laptop *asus)
1011{
1012 unsigned long long status;
1013 acpi_status rv = AE_OK;
1014
1015 rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
1016 NULL, &status);
1017 if (ACPI_FAILURE(rv)) {
1018 pr_warning("Error reading GPS status\n");
1019 return -ENODEV;
1020 }
1021 return !!status;
1022}
1023
1024static int asus_gps_switch(struct asus_laptop *asus, int status)
1025{
1026 const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF;
1027
1028 if (write_acpi_int(asus->handle, meth, 0x02))
1029 return -ENODEV;
1030 return 0;
1031}
1032
864static ssize_t show_gps(struct device *dev, 1033static ssize_t show_gps(struct device *dev,
865 struct device_attribute *attr, char *buf) 1034 struct device_attribute *attr, char *buf)
866{ 1035{
867 return sprintf(buf, "%d\n", read_status(GPS_ON)); 1036 struct asus_laptop *asus = dev_get_drvdata(dev);
1037
1038 return sprintf(buf, "%d\n", asus_gps_status(asus));
868} 1039}
869 1040
870static ssize_t store_gps(struct device *dev, struct device_attribute *attr, 1041static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
871 const char *buf, size_t count) 1042 const char *buf, size_t count)
872{ 1043{
873 return store_status(buf, count, NULL, GPS_ON); 1044 struct asus_laptop *asus = dev_get_drvdata(dev);
1045 int rv, value;
1046 int ret;
1047
1048 rv = parse_arg(buf, count, &value);
1049 if (rv <= 0)
1050 return -EINVAL;
1051 ret = asus_gps_switch(asus, !!value);
1052 if (ret)
1053 return ret;
1054 rfkill_set_sw_state(asus->gps_rfkill, !value);
1055 return rv;
874} 1056}
875 1057
876/* 1058/*
877 * Hotkey functions 1059 * rfkill
878 */ 1060 */
879static struct key_entry *asus_get_entry_by_scancode(int code) 1061static int asus_gps_rfkill_set(void *data, bool blocked)
880{ 1062{
881 struct key_entry *key; 1063 acpi_handle handle = data;
882
883 for (key = asus_keymap; key->type != KE_END; key++)
884 if (code == key->code)
885 return key;
886 1064
887 return NULL; 1065 return asus_gps_switch(handle, !blocked);
888} 1066}
889 1067
890static struct key_entry *asus_get_entry_by_keycode(int code) 1068static const struct rfkill_ops asus_gps_rfkill_ops = {
891{ 1069 .set_block = asus_gps_rfkill_set,
892 struct key_entry *key; 1070};
893
894 for (key = asus_keymap; key->type != KE_END; key++)
895 if (code == key->keycode && key->type == KE_KEY)
896 return key;
897 1071
898 return NULL; 1072static void asus_rfkill_exit(struct asus_laptop *asus)
1073{
1074 if (asus->gps_rfkill) {
1075 rfkill_unregister(asus->gps_rfkill);
1076 rfkill_destroy(asus->gps_rfkill);
1077 asus->gps_rfkill = NULL;
1078 }
899} 1079}
900 1080
901static int asus_getkeycode(struct input_dev *dev, int scancode, int *keycode) 1081static int asus_rfkill_init(struct asus_laptop *asus)
902{ 1082{
903 struct key_entry *key = asus_get_entry_by_scancode(scancode); 1083 int result;
904 1084
905 if (key && key->type == KE_KEY) { 1085 if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) ||
906 *keycode = key->keycode; 1086 acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) ||
1087 acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL))
907 return 0; 1088 return 0;
1089
1090 asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
1091 RFKILL_TYPE_GPS,
1092 &asus_gps_rfkill_ops, NULL);
1093 if (!asus->gps_rfkill)
1094 return -EINVAL;
1095
1096 result = rfkill_register(asus->gps_rfkill);
1097 if (result) {
1098 rfkill_destroy(asus->gps_rfkill);
1099 asus->gps_rfkill = NULL;
908 } 1100 }
909 1101
910 return -EINVAL; 1102 return result;
911} 1103}
912 1104
913static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode) 1105/*
1106 * Input device (i.e. hotkeys)
1107 */
1108static void asus_input_notify(struct asus_laptop *asus, int event)
914{ 1109{
915 struct key_entry *key; 1110 if (asus->inputdev)
916 int old_keycode; 1111 sparse_keymap_report_event(asus->inputdev, event, 1, true);
1112}
917 1113
918 if (keycode < 0 || keycode > KEY_MAX) 1114static int asus_input_init(struct asus_laptop *asus)
919 return -EINVAL; 1115{
1116 struct input_dev *input;
1117 int error;
920 1118
921 key = asus_get_entry_by_scancode(scancode); 1119 input = input_allocate_device();
922 if (key && key->type == KE_KEY) { 1120 if (!input) {
923 old_keycode = key->keycode; 1121 pr_info("Unable to allocate input device\n");
924 key->keycode = keycode;
925 set_bit(keycode, dev->keybit);
926 if (!asus_get_entry_by_keycode(old_keycode))
927 clear_bit(old_keycode, dev->keybit);
928 return 0; 1122 return 0;
929 } 1123 }
1124 input->name = "Asus Laptop extra buttons";
1125 input->phys = ASUS_LAPTOP_FILE "/input0";
1126 input->id.bustype = BUS_HOST;
1127 input->dev.parent = &asus->platform_device->dev;
1128 input_set_drvdata(input, asus);
1129
1130 error = sparse_keymap_setup(input, asus_keymap, NULL);
1131 if (error) {
1132 pr_err("Unable to setup input device keymap\n");
1133 goto err_keymap;
1134 }
1135 error = input_register_device(input);
1136 if (error) {
1137 pr_info("Unable to register input device\n");
1138 goto err_device;
1139 }
1140
1141 asus->inputdev = input;
1142 return 0;
930 1143
931 return -EINVAL; 1144err_keymap:
1145 sparse_keymap_free(input);
1146err_device:
1147 input_free_device(input);
1148 return error;
932} 1149}
933 1150
934static void asus_hotk_notify(struct acpi_device *device, u32 event) 1151static void asus_input_exit(struct asus_laptop *asus)
935{ 1152{
936 static struct key_entry *key; 1153 if (asus->inputdev) {
937 u16 count; 1154 sparse_keymap_free(asus->inputdev);
1155 input_unregister_device(asus->inputdev);
1156 }
1157}
938 1158
939 /* TODO Find a better way to handle events count. */ 1159/*
940 if (!hotk) 1160 * ACPI driver
941 return; 1161 */
1162static void asus_acpi_notify(struct acpi_device *device, u32 event)
1163{
1164 struct asus_laptop *asus = acpi_driver_data(device);
1165 u16 count;
942 1166
943 /* 1167 /*
944 * We need to tell the backlight device when the backlight power is 1168 * We need to tell the backlight device when the backlight power is
945 * switched 1169 * switched
946 */ 1170 */
947 if (event == ATKD_LCD_ON) { 1171 if (event == ATKD_LCD_ON)
948 write_status(NULL, 1, LCD_ON); 1172 lcd_blank(asus, FB_BLANK_UNBLANK);
949 lcd_blank(FB_BLANK_UNBLANK); 1173 else if (event == ATKD_LCD_OFF)
950 } else if (event == ATKD_LCD_OFF) { 1174 lcd_blank(asus, FB_BLANK_POWERDOWN);
951 write_status(NULL, 0, LCD_ON);
952 lcd_blank(FB_BLANK_POWERDOWN);
953 }
954 1175
955 count = hotk->event_count[event % 128]++; 1176 /* TODO Find a better way to handle events count. */
956 acpi_bus_generate_proc_event(hotk->device, event, count); 1177 count = asus->event_count[event % 128]++;
957 acpi_bus_generate_netlink_event(hotk->device->pnp.device_class, 1178 acpi_bus_generate_proc_event(asus->device, event, count);
958 dev_name(&hotk->device->dev), event, 1179 acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
1180 dev_name(&asus->device->dev), event,
959 count); 1181 count);
960 1182
961 if (hotk->inputdev) { 1183 /* Brightness events are special */
962 key = asus_get_entry_by_scancode(event); 1184 if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
963 if (!key) 1185
964 return ; 1186 /* Ignore them completely if the acpi video driver is used */
965 1187 if (asus->backlight_device != NULL) {
966 switch (key->type) { 1188 /* Update the backlight device. */
967 case KE_KEY: 1189 asus_backlight_notify(asus);
968 input_report_key(hotk->inputdev, key->keycode, 1);
969 input_sync(hotk->inputdev);
970 input_report_key(hotk->inputdev, key->keycode, 0);
971 input_sync(hotk->inputdev);
972 break;
973 } 1190 }
1191 return ;
974 } 1192 }
1193 asus_input_notify(asus, event);
975} 1194}
976 1195
977#define ASUS_CREATE_DEVICE_ATTR(_name) \ 1196static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
978 struct device_attribute dev_attr_##_name = { \ 1197static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
979 .attr = { \ 1198static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth,
980 .name = __stringify(_name), \ 1199 store_bluetooth);
981 .mode = 0 }, \ 1200static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
982 .show = NULL, \ 1201static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
983 .store = NULL, \ 1202static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
1203static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
1204static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
1205
1206static void asus_sysfs_exit(struct asus_laptop *asus)
1207{
1208 struct platform_device *device = asus->platform_device;
1209
1210 device_remove_file(&device->dev, &dev_attr_infos);
1211 device_remove_file(&device->dev, &dev_attr_wlan);
1212 device_remove_file(&device->dev, &dev_attr_bluetooth);
1213 device_remove_file(&device->dev, &dev_attr_display);
1214 device_remove_file(&device->dev, &dev_attr_ledd);
1215 device_remove_file(&device->dev, &dev_attr_ls_switch);
1216 device_remove_file(&device->dev, &dev_attr_ls_level);
1217 device_remove_file(&device->dev, &dev_attr_gps);
1218}
1219
1220static int asus_sysfs_init(struct asus_laptop *asus)
1221{
1222 struct platform_device *device = asus->platform_device;
1223 int err;
1224
1225 err = device_create_file(&device->dev, &dev_attr_infos);
1226 if (err)
1227 return err;
1228
1229 if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) {
1230 err = device_create_file(&device->dev, &dev_attr_wlan);
1231 if (err)
1232 return err;
984 } 1233 }
985 1234
986#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \ 1235 if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) {
987 do { \ 1236 err = device_create_file(&device->dev, &dev_attr_bluetooth);
988 dev_attr_##_name.attr.mode = _mode; \ 1237 if (err)
989 dev_attr_##_name.show = _show; \ 1238 return err;
990 dev_attr_##_name.store = _store; \ 1239 }
991 } while(0)
992
993static ASUS_CREATE_DEVICE_ATTR(infos);
994static ASUS_CREATE_DEVICE_ATTR(wlan);
995static ASUS_CREATE_DEVICE_ATTR(bluetooth);
996static ASUS_CREATE_DEVICE_ATTR(display);
997static ASUS_CREATE_DEVICE_ATTR(ledd);
998static ASUS_CREATE_DEVICE_ATTR(ls_switch);
999static ASUS_CREATE_DEVICE_ATTR(ls_level);
1000static ASUS_CREATE_DEVICE_ATTR(gps);
1001
1002static struct attribute *asuspf_attributes[] = {
1003 &dev_attr_infos.attr,
1004 &dev_attr_wlan.attr,
1005 &dev_attr_bluetooth.attr,
1006 &dev_attr_display.attr,
1007 &dev_attr_ledd.attr,
1008 &dev_attr_ls_switch.attr,
1009 &dev_attr_ls_level.attr,
1010 &dev_attr_gps.attr,
1011 NULL
1012};
1013 1240
1014static struct attribute_group asuspf_attribute_group = { 1241 if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) {
1015 .attrs = asuspf_attributes 1242 err = device_create_file(&device->dev, &dev_attr_display);
1016}; 1243 if (err)
1244 return err;
1245 }
1017 1246
1018static struct platform_driver asuspf_driver = { 1247 if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) {
1019 .driver = { 1248 err = device_create_file(&device->dev, &dev_attr_ledd);
1020 .name = ASUS_HOTK_FILE, 1249 if (err)
1021 .owner = THIS_MODULE, 1250 return err;
1022 } 1251 }
1023};
1024 1252
1025static struct platform_device *asuspf_device; 1253 if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
1254 !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
1255 err = device_create_file(&device->dev, &dev_attr_ls_switch);
1256 if (err)
1257 return err;
1258 err = device_create_file(&device->dev, &dev_attr_ls_level);
1259 if (err)
1260 return err;
1261 }
1026 1262
1027static void asus_hotk_add_fs(void) 1263 if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
1028{ 1264 !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
1029 ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL); 1265 !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) {
1266 err = device_create_file(&device->dev, &dev_attr_gps);
1267 if (err)
1268 return err;
1269 }
1030 1270
1031 if (wl_switch_handle) 1271 return err;
1032 ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan); 1272}
1273
1274static int asus_platform_init(struct asus_laptop *asus)
1275{
1276 int err;
1033 1277
1034 if (bt_switch_handle) 1278 asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
1035 ASUS_SET_DEVICE_ATTR(bluetooth, 0644, 1279 if (!asus->platform_device)
1036 show_bluetooth, store_bluetooth); 1280 return -ENOMEM;
1281 platform_set_drvdata(asus->platform_device, asus);
1037 1282
1038 if (display_set_handle && display_get_handle) 1283 err = platform_device_add(asus->platform_device);
1039 ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp); 1284 if (err)
1040 else if (display_set_handle) 1285 goto fail_platform_device;
1041 ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
1042 1286
1043 if (ledd_set_handle) 1287 err = asus_sysfs_init(asus);
1044 ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd); 1288 if (err)
1289 goto fail_sysfs;
1290 return 0;
1045 1291
1046 if (ls_switch_handle && ls_level_handle) { 1292fail_sysfs:
1047 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl); 1293 asus_sysfs_exit(asus);
1048 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw); 1294 platform_device_del(asus->platform_device);
1049 } 1295fail_platform_device:
1296 platform_device_put(asus->platform_device);
1297 return err;
1298}
1050 1299
1051 if (gps_status_handle && gps_on_handle && gps_off_handle) 1300static void asus_platform_exit(struct asus_laptop *asus)
1052 ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps); 1301{
1302 asus_sysfs_exit(asus);
1303 platform_device_unregister(asus->platform_device);
1053} 1304}
1054 1305
1306static struct platform_driver platform_driver = {
1307 .driver = {
1308 .name = ASUS_LAPTOP_FILE,
1309 .owner = THIS_MODULE,
1310 }
1311};
1312
1055static int asus_handle_init(char *name, acpi_handle * handle, 1313static int asus_handle_init(char *name, acpi_handle * handle,
1056 char **paths, int num_paths) 1314 char **paths, int num_paths)
1057{ 1315{
@@ -1073,10 +1331,11 @@ static int asus_handle_init(char *name, acpi_handle * handle,
1073 ARRAY_SIZE(object##_paths)) 1331 ARRAY_SIZE(object##_paths))
1074 1332
1075/* 1333/*
1076 * This function is used to initialize the hotk with right values. In this 1334 * This function is used to initialize the context with right values. In this
1077 * method, we can make all the detection we want, and modify the hotk struct 1335 * method, we can make all the detection we want, and modify the asus_laptop
1336 * struct
1078 */ 1337 */
1079static int asus_hotk_get_info(void) 1338static int asus_laptop_get_info(struct asus_laptop *asus)
1080{ 1339{
1081 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1340 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1082 union acpi_object *model = NULL; 1341 union acpi_object *model = NULL;
@@ -1089,22 +1348,21 @@ static int asus_hotk_get_info(void)
1089 * models, but late enough to allow acpi_bus_register_driver() to fail 1348 * models, but late enough to allow acpi_bus_register_driver() to fail
1090 * before doing anything ACPI-specific. Should we encounter a machine, 1349 * before doing anything ACPI-specific. Should we encounter a machine,
1091 * which needs special handling (i.e. its hotkey device has a different 1350 * which needs special handling (i.e. its hotkey device has a different
1092 * HID), this bit will be moved. A global variable asus_info contains 1351 * HID), this bit will be moved.
1093 * the DSDT header.
1094 */ 1352 */
1095 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); 1353 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
1096 if (ACPI_FAILURE(status)) 1354 if (ACPI_FAILURE(status))
1097 pr_warning("Couldn't get the DSDT table header\n"); 1355 pr_warning("Couldn't get the DSDT table header\n");
1098 1356
1099 /* We have to write 0 on init this far for all ASUS models */ 1357 /* We have to write 0 on init this far for all ASUS models */
1100 if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { 1358 if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
1101 pr_err("Hotkey initialization failed\n"); 1359 pr_err("Hotkey initialization failed\n");
1102 return -ENODEV; 1360 return -ENODEV;
1103 } 1361 }
1104 1362
1105 /* This needs to be called for some laptops to init properly */ 1363 /* This needs to be called for some laptops to init properly */
1106 status = 1364 status =
1107 acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); 1365 acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
1108 if (ACPI_FAILURE(status)) 1366 if (ACPI_FAILURE(status))
1109 pr_warning("Error calling BSTS\n"); 1367 pr_warning("Error calling BSTS\n");
1110 else if (bsts_result) 1368 else if (bsts_result)
@@ -1112,8 +1370,8 @@ static int asus_hotk_get_info(void)
1112 (uint) bsts_result); 1370 (uint) bsts_result);
1113 1371
1114 /* This too ... */ 1372 /* This too ... */
1115 write_acpi_int(hotk->handle, "CWAP", wapf, NULL); 1373 if (write_acpi_int(asus->handle, "CWAP", wapf))
1116 1374 pr_err("Error calling CWAP(%d)\n", wapf);
1117 /* 1375 /*
1118 * Try to match the object returned by INIT to the specific model. 1376 * Try to match the object returned by INIT to the specific model.
1119 * Handle every possible object (or the lack of thereof) the DSDT 1377 * Handle every possible object (or the lack of thereof) the DSDT
@@ -1134,397 +1392,210 @@ static int asus_hotk_get_info(void)
1134 break; 1392 break;
1135 } 1393 }
1136 } 1394 }
1137 hotk->name = kstrdup(string, GFP_KERNEL); 1395 asus->name = kstrdup(string, GFP_KERNEL);
1138 if (!hotk->name) 1396 if (!asus->name)
1139 return -ENOMEM; 1397 return -ENOMEM;
1140 1398
1141 if (*string) 1399 if (*string)
1142 pr_notice(" %s model detected\n", string); 1400 pr_notice(" %s model detected\n", string);
1143 1401
1144 ASUS_HANDLE_INIT(mled_set);
1145 ASUS_HANDLE_INIT(tled_set);
1146 ASUS_HANDLE_INIT(rled_set);
1147 ASUS_HANDLE_INIT(pled_set);
1148 ASUS_HANDLE_INIT(gled_set);
1149
1150 ASUS_HANDLE_INIT(ledd_set);
1151
1152 ASUS_HANDLE_INIT(kled_set);
1153 ASUS_HANDLE_INIT(kled_get);
1154
1155 /* 1402 /*
1156 * The HWRS method return informations about the hardware. 1403 * The HWRS method return informations about the hardware.
1157 * 0x80 bit is for WLAN, 0x100 for Bluetooth. 1404 * 0x80 bit is for WLAN, 0x100 for Bluetooth.
1158 * The significance of others is yet to be found. 1405 * The significance of others is yet to be found.
1159 * If we don't find the method, we assume the device are present.
1160 */ 1406 */
1161 status = 1407 status =
1162 acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &hwrs_result); 1408 acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
1163 if (ACPI_FAILURE(status)) 1409 if (!ACPI_FAILURE(status))
1164 hwrs_result = WL_HWRS | BT_HWRS; 1410 pr_notice(" HRWS returned %x", (int)hwrs_result);
1165
1166 if (hwrs_result & WL_HWRS)
1167 ASUS_HANDLE_INIT(wl_switch);
1168 if (hwrs_result & BT_HWRS)
1169 ASUS_HANDLE_INIT(bt_switch);
1170
1171 ASUS_HANDLE_INIT(wireless_status);
1172 1411
1173 ASUS_HANDLE_INIT(brightness_set); 1412 if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
1174 ASUS_HANDLE_INIT(brightness_get); 1413 asus->have_rsts = true;
1175 1414
1415 /* Scheduled for removal */
1176 ASUS_HANDLE_INIT(lcd_switch); 1416 ASUS_HANDLE_INIT(lcd_switch);
1177
1178 ASUS_HANDLE_INIT(display_set);
1179 ASUS_HANDLE_INIT(display_get); 1417 ASUS_HANDLE_INIT(display_get);
1180 1418
1181 /*
1182 * There is a lot of models with "ALSL", but a few get
1183 * a real light sens, so we need to check it.
1184 */
1185 if (!ASUS_HANDLE_INIT(ls_switch))
1186 ASUS_HANDLE_INIT(ls_level);
1187
1188 ASUS_HANDLE_INIT(gps_on);
1189 ASUS_HANDLE_INIT(gps_off);
1190 ASUS_HANDLE_INIT(gps_status);
1191
1192 kfree(model); 1419 kfree(model);
1193 1420
1194 return AE_OK; 1421 return AE_OK;
1195} 1422}
1196 1423
1197static int asus_input_init(void) 1424static bool asus_device_present;
1198{
1199 const struct key_entry *key;
1200 int result;
1201 1425
1202 hotk->inputdev = input_allocate_device(); 1426static int __devinit asus_acpi_init(struct asus_laptop *asus)
1203 if (!hotk->inputdev) {
1204 pr_info("Unable to allocate input device\n");
1205 return 0;
1206 }
1207 hotk->inputdev->name = "Asus Laptop extra buttons";
1208 hotk->inputdev->phys = ASUS_HOTK_FILE "/input0";
1209 hotk->inputdev->id.bustype = BUS_HOST;
1210 hotk->inputdev->getkeycode = asus_getkeycode;
1211 hotk->inputdev->setkeycode = asus_setkeycode;
1212
1213 for (key = asus_keymap; key->type != KE_END; key++) {
1214 switch (key->type) {
1215 case KE_KEY:
1216 set_bit(EV_KEY, hotk->inputdev->evbit);
1217 set_bit(key->keycode, hotk->inputdev->keybit);
1218 break;
1219 }
1220 }
1221 result = input_register_device(hotk->inputdev);
1222 if (result) {
1223 pr_info("Unable to register input device\n");
1224 input_free_device(hotk->inputdev);
1225 }
1226 return result;
1227}
1228
1229static int asus_hotk_check(void)
1230{ 1427{
1231 int result = 0; 1428 int result = 0;
1232 1429
1233 result = acpi_bus_get_status(hotk->device); 1430 result = acpi_bus_get_status(asus->device);
1234 if (result) 1431 if (result)
1235 return result; 1432 return result;
1236 1433 if (!asus->device->status.present) {
1237 if (hotk->device->status.present) {
1238 result = asus_hotk_get_info();
1239 } else {
1240 pr_err("Hotkey device not present, aborting\n"); 1434 pr_err("Hotkey device not present, aborting\n");
1241 return -EINVAL; 1435 return -ENODEV;
1242 } 1436 }
1243 1437
1244 return result; 1438 result = asus_laptop_get_info(asus);
1245}
1246
1247static int asus_hotk_found;
1248
1249static int asus_hotk_add(struct acpi_device *device)
1250{
1251 int result;
1252
1253 pr_notice("Asus Laptop Support version %s\n",
1254 ASUS_LAPTOP_VERSION);
1255
1256 hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
1257 if (!hotk)
1258 return -ENOMEM;
1259
1260 hotk->handle = device->handle;
1261 strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
1262 strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
1263 device->driver_data = hotk;
1264 hotk->device = device;
1265
1266 result = asus_hotk_check();
1267 if (result) 1439 if (result)
1268 goto end; 1440 return result;
1269
1270 asus_hotk_add_fs();
1271
1272 asus_hotk_found = 1;
1273 1441
1274 /* WLED and BLED are on by default */ 1442 /* WLED and BLED are on by default */
1275 write_status(bt_switch_handle, 1, BT_ON); 1443 if (bluetooth_status >= 0)
1276 write_status(wl_switch_handle, 1, WL_ON); 1444 asus_bluetooth_set(asus, !!bluetooth_status);
1277
1278 /* If the h/w switch is off, we need to check the real status */
1279 write_status(NULL, read_status(BT_ON), BT_ON);
1280 write_status(NULL, read_status(WL_ON), WL_ON);
1281 1445
1282 /* LCD Backlight is on by default */ 1446 if (wlan_status >= 0)
1283 write_status(NULL, 1, LCD_ON); 1447 asus_wlan_set(asus, !!wlan_status);
1284 1448
1285 /* Keyboard Backlight is on by default */ 1449 /* Keyboard Backlight is on by default */
1286 if (kled_set_handle) 1450 if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
1287 set_kled_lvl(1); 1451 asus_kled_set(asus, 1);
1288 1452
1289 /* LED display is off by default */ 1453 /* LED display is off by default */
1290 hotk->ledd_status = 0xFFF; 1454 asus->ledd_status = 0xFFF;
1291 1455
1292 /* Set initial values of light sensor and level */ 1456 /* Set initial values of light sensor and level */
1293 hotk->light_switch = 0; /* Default to light sensor disabled */ 1457 asus->light_switch = 0; /* Default to light sensor disabled */
1294 hotk->light_level = 5; /* level 5 for sensor sensitivity */ 1458 asus->light_level = 5; /* level 5 for sensor sensitivity */
1295 1459
1296 if (ls_switch_handle) 1460 if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
1297 set_light_sens_switch(hotk->light_switch); 1461 !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
1298 1462 asus_als_switch(asus, asus->light_switch);
1299 if (ls_level_handle) 1463 asus_als_level(asus, asus->light_level);
1300 set_light_sens_level(hotk->light_level);
1301
1302 /* GPS is on by default */
1303 write_status(NULL, 1, GPS_ON);
1304
1305end:
1306 if (result) {
1307 kfree(hotk->name);
1308 kfree(hotk);
1309 } 1464 }
1310 1465
1466 asus->lcd_state = 1; /* LCD should be on when the module load */
1311 return result; 1467 return result;
1312} 1468}
1313 1469
1314static int asus_hotk_remove(struct acpi_device *device, int type) 1470static int __devinit asus_acpi_add(struct acpi_device *device)
1315{
1316 kfree(hotk->name);
1317 kfree(hotk);
1318
1319 return 0;
1320}
1321
1322static void asus_backlight_exit(void)
1323{ 1471{
1324 if (asus_backlight_device) 1472 struct asus_laptop *asus;
1325 backlight_device_unregister(asus_backlight_device); 1473 int result;
1326}
1327
1328#define ASUS_LED_UNREGISTER(object) \
1329 if (object##_led.dev) \
1330 led_classdev_unregister(&object##_led)
1331 1474
1332static void asus_led_exit(void) 1475 pr_notice("Asus Laptop Support version %s\n",
1333{ 1476 ASUS_LAPTOP_VERSION);
1334 destroy_workqueue(led_workqueue); 1477 asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL);
1335 ASUS_LED_UNREGISTER(mled); 1478 if (!asus)
1336 ASUS_LED_UNREGISTER(tled); 1479 return -ENOMEM;
1337 ASUS_LED_UNREGISTER(pled); 1480 asus->handle = device->handle;
1338 ASUS_LED_UNREGISTER(rled); 1481 strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME);
1339 ASUS_LED_UNREGISTER(gled); 1482 strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS);
1340 ASUS_LED_UNREGISTER(kled); 1483 device->driver_data = asus;
1341} 1484 asus->device = device;
1342 1485
1343static void asus_input_exit(void) 1486 result = asus_acpi_init(asus);
1344{ 1487 if (result)
1345 if (hotk->inputdev) 1488 goto fail_platform;
1346 input_unregister_device(hotk->inputdev);
1347}
1348 1489
1349static void __exit asus_laptop_exit(void) 1490 /*
1350{ 1491 * Register the platform device first. It is used as a parent for the
1351 asus_backlight_exit(); 1492 * sub-devices below.
1352 asus_led_exit(); 1493 */
1353 asus_input_exit(); 1494 result = asus_platform_init(asus);
1495 if (result)
1496 goto fail_platform;
1354 1497
1355 acpi_bus_unregister_driver(&asus_hotk_driver); 1498 if (!acpi_video_backlight_support()) {
1356 sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group); 1499 result = asus_backlight_init(asus);
1357 platform_device_unregister(asuspf_device); 1500 if (result)
1358 platform_driver_unregister(&asuspf_driver); 1501 goto fail_backlight;
1359} 1502 } else
1503 pr_info("Backlight controlled by ACPI video driver\n");
1360 1504
1361static int asus_backlight_init(struct device *dev) 1505 result = asus_input_init(asus);
1362{ 1506 if (result)
1363 struct backlight_device *bd; 1507 goto fail_input;
1364 1508
1365 if (brightness_set_handle && lcd_switch_handle) { 1509 result = asus_led_init(asus);
1366 bd = backlight_device_register(ASUS_HOTK_FILE, dev, 1510 if (result)
1367 NULL, &asusbl_ops); 1511 goto fail_led;
1368 if (IS_ERR(bd)) {
1369 pr_err("Could not register asus backlight device\n");
1370 asus_backlight_device = NULL;
1371 return PTR_ERR(bd);
1372 }
1373 1512
1374 asus_backlight_device = bd; 1513 result = asus_rfkill_init(asus);
1514 if (result)
1515 goto fail_rfkill;
1375 1516
1376 bd->props.max_brightness = 15; 1517 asus_device_present = true;
1377 bd->props.brightness = read_brightness(NULL);
1378 bd->props.power = FB_BLANK_UNBLANK;
1379 backlight_update_status(bd);
1380 }
1381 return 0; 1518 return 0;
1382}
1383 1519
1384static int asus_led_register(acpi_handle handle, 1520fail_rfkill:
1385 struct led_classdev *ldev, struct device *dev) 1521 asus_led_exit(asus);
1386{ 1522fail_led:
1387 if (!handle) 1523 asus_input_exit(asus);
1388 return 0; 1524fail_input:
1525 asus_backlight_exit(asus);
1526fail_backlight:
1527 asus_platform_exit(asus);
1528fail_platform:
1529 kfree(asus->name);
1530 kfree(asus);
1389 1531
1390 return led_classdev_register(dev, ldev); 1532 return result;
1391} 1533}
1392 1534
1393#define ASUS_LED_REGISTER(object, device) \ 1535static int asus_acpi_remove(struct acpi_device *device, int type)
1394 asus_led_register(object##_set_handle, &object##_led, device)
1395
1396static int asus_led_init(struct device *dev)
1397{ 1536{
1398 int rv; 1537 struct asus_laptop *asus = acpi_driver_data(device);
1399
1400 rv = ASUS_LED_REGISTER(mled, dev);
1401 if (rv)
1402 goto out;
1403
1404 rv = ASUS_LED_REGISTER(tled, dev);
1405 if (rv)
1406 goto out1;
1407
1408 rv = ASUS_LED_REGISTER(rled, dev);
1409 if (rv)
1410 goto out2;
1411
1412 rv = ASUS_LED_REGISTER(pled, dev);
1413 if (rv)
1414 goto out3;
1415
1416 rv = ASUS_LED_REGISTER(gled, dev);
1417 if (rv)
1418 goto out4;
1419 1538
1420 if (kled_set_handle && kled_get_handle) 1539 asus_backlight_exit(asus);
1421 rv = ASUS_LED_REGISTER(kled, dev); 1540 asus_rfkill_exit(asus);
1422 if (rv) 1541 asus_led_exit(asus);
1423 goto out5; 1542 asus_input_exit(asus);
1424 1543 asus_platform_exit(asus);
1425 led_workqueue = create_singlethread_workqueue("led_workqueue");
1426 if (!led_workqueue)
1427 goto out6;
1428 1544
1545 kfree(asus->name);
1546 kfree(asus);
1429 return 0; 1547 return 0;
1430out6:
1431 rv = -ENOMEM;
1432 ASUS_LED_UNREGISTER(kled);
1433out5:
1434 ASUS_LED_UNREGISTER(gled);
1435out4:
1436 ASUS_LED_UNREGISTER(pled);
1437out3:
1438 ASUS_LED_UNREGISTER(rled);
1439out2:
1440 ASUS_LED_UNREGISTER(tled);
1441out1:
1442 ASUS_LED_UNREGISTER(mled);
1443out:
1444 return rv;
1445} 1548}
1446 1549
1550static const struct acpi_device_id asus_device_ids[] = {
1551 {"ATK0100", 0},
1552 {"ATK0101", 0},
1553 {"", 0},
1554};
1555MODULE_DEVICE_TABLE(acpi, asus_device_ids);
1556
1557static struct acpi_driver asus_acpi_driver = {
1558 .name = ASUS_LAPTOP_NAME,
1559 .class = ASUS_LAPTOP_CLASS,
1560 .owner = THIS_MODULE,
1561 .ids = asus_device_ids,
1562 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
1563 .ops = {
1564 .add = asus_acpi_add,
1565 .remove = asus_acpi_remove,
1566 .notify = asus_acpi_notify,
1567 },
1568};
1569
1447static int __init asus_laptop_init(void) 1570static int __init asus_laptop_init(void)
1448{ 1571{
1449 int result; 1572 int result;
1450 1573
1451 result = acpi_bus_register_driver(&asus_hotk_driver); 1574 result = platform_driver_register(&platform_driver);
1452 if (result < 0) 1575 if (result < 0)
1453 return result; 1576 return result;
1454 1577
1455 /* 1578 result = acpi_bus_register_driver(&asus_acpi_driver);
1456 * This is a bit of a kludge. We only want this module loaded 1579 if (result < 0)
1457 * for ASUS systems, but there's currently no way to probe the 1580 goto fail_acpi_driver;
1458 * ACPI namespace for ASUS HIDs. So we just return failure if 1581 if (!asus_device_present) {
1459 * we didn't find one, which will cause the module to be 1582 result = -ENODEV;
1460 * unloaded. 1583 goto fail_no_device;
1461 */
1462 if (!asus_hotk_found) {
1463 acpi_bus_unregister_driver(&asus_hotk_driver);
1464 return -ENODEV;
1465 }
1466
1467 result = asus_input_init();
1468 if (result)
1469 goto fail_input;
1470
1471 /* Register platform stuff */
1472 result = platform_driver_register(&asuspf_driver);
1473 if (result)
1474 goto fail_platform_driver;
1475
1476 asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
1477 if (!asuspf_device) {
1478 result = -ENOMEM;
1479 goto fail_platform_device1;
1480 } 1584 }
1481
1482 result = platform_device_add(asuspf_device);
1483 if (result)
1484 goto fail_platform_device2;
1485
1486 result = sysfs_create_group(&asuspf_device->dev.kobj,
1487 &asuspf_attribute_group);
1488 if (result)
1489 goto fail_sysfs;
1490
1491 result = asus_led_init(&asuspf_device->dev);
1492 if (result)
1493 goto fail_led;
1494
1495 if (!acpi_video_backlight_support()) {
1496 result = asus_backlight_init(&asuspf_device->dev);
1497 if (result)
1498 goto fail_backlight;
1499 } else
1500 pr_info("Brightness ignored, must be controlled by "
1501 "ACPI video driver\n");
1502
1503 return 0; 1585 return 0;
1504 1586
1505fail_backlight: 1587fail_no_device:
1506 asus_led_exit(); 1588 acpi_bus_unregister_driver(&asus_acpi_driver);
1507 1589fail_acpi_driver:
1508fail_led: 1590 platform_driver_unregister(&platform_driver);
1509 sysfs_remove_group(&asuspf_device->dev.kobj,
1510 &asuspf_attribute_group);
1511
1512fail_sysfs:
1513 platform_device_del(asuspf_device);
1514
1515fail_platform_device2:
1516 platform_device_put(asuspf_device);
1517
1518fail_platform_device1:
1519 platform_driver_unregister(&asuspf_driver);
1520
1521fail_platform_driver:
1522 asus_input_exit();
1523
1524fail_input:
1525
1526 return result; 1591 return result;
1527} 1592}
1528 1593
1594static void __exit asus_laptop_exit(void)
1595{
1596 acpi_bus_unregister_driver(&asus_acpi_driver);
1597 platform_driver_unregister(&platform_driver);
1598}
1599
1529module_init(asus_laptop_init); 1600module_init(asus_laptop_init);
1530module_exit(asus_laptop_exit); 1601module_exit(asus_laptop_exit);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index c1d2aeeea948..1381430e1105 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1225,9 +1225,8 @@ static int asus_model_match(char *model)
1225 else if (strncmp(model, "M2N", 3) == 0 || 1225 else if (strncmp(model, "M2N", 3) == 0 ||
1226 strncmp(model, "M3N", 3) == 0 || 1226 strncmp(model, "M3N", 3) == 0 ||
1227 strncmp(model, "M5N", 3) == 0 || 1227 strncmp(model, "M5N", 3) == 0 ||
1228 strncmp(model, "M6N", 3) == 0 ||
1229 strncmp(model, "S1N", 3) == 0 || 1228 strncmp(model, "S1N", 3) == 0 ||
1230 strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0) 1229 strncmp(model, "S5N", 3) == 0)
1231 return xxN; 1230 return xxN;
1232 else if (strncmp(model, "M1", 2) == 0) 1231 else if (strncmp(model, "M1", 2) == 0)
1233 return M1A; 1232 return M1A;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 8cb20e45bad6..035a7dd65a3f 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -507,6 +507,10 @@ static int cmpc_keys_codes[] = {
507 KEY_BRIGHTNESSDOWN, 507 KEY_BRIGHTNESSDOWN,
508 KEY_BRIGHTNESSUP, 508 KEY_BRIGHTNESSUP,
509 KEY_VENDOR, 509 KEY_VENDOR,
510 KEY_UNKNOWN,
511 KEY_CAMERA,
512 KEY_BACK,
513 KEY_FORWARD,
510 KEY_MAX 514 KEY_MAX
511}; 515};
512 516
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b7f4d2705916..ef614979afe9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -132,8 +132,8 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
132}; 132};
133 133
134static struct calling_interface_buffer *buffer; 134static struct calling_interface_buffer *buffer;
135struct page *bufferpage; 135static struct page *bufferpage;
136DEFINE_MUTEX(buffer_mutex); 136static DEFINE_MUTEX(buffer_mutex);
137 137
138static int hwswitch_state; 138static int hwswitch_state;
139 139
@@ -580,6 +580,7 @@ static int __init dell_init(void)
580 580
581fail_backlight: 581fail_backlight:
582 i8042_remove_filter(dell_laptop_i8042_filter); 582 i8042_remove_filter(dell_laptop_i8042_filter);
583 cancel_delayed_work_sync(&dell_rfkill_work);
583fail_filter: 584fail_filter:
584 dell_cleanup_rfkill(); 585 dell_cleanup_rfkill();
585fail_rfkill: 586fail_rfkill:
@@ -597,12 +598,12 @@ fail_platform_driver:
597 598
598static void __exit dell_exit(void) 599static void __exit dell_exit(void)
599{ 600{
600 cancel_delayed_work_sync(&dell_rfkill_work);
601 i8042_remove_filter(dell_laptop_i8042_filter); 601 i8042_remove_filter(dell_laptop_i8042_filter);
602 cancel_delayed_work_sync(&dell_rfkill_work);
602 backlight_device_unregister(dell_backlight_device); 603 backlight_device_unregister(dell_backlight_device);
603 dell_cleanup_rfkill(); 604 dell_cleanup_rfkill();
604 if (platform_device) { 605 if (platform_device) {
605 platform_device_del(platform_device); 606 platform_device_unregister(platform_device);
606 platform_driver_unregister(&platform_driver); 607 platform_driver_unregister(&platform_driver);
607 } 608 }
608 kfree(da_tokens); 609 kfree(da_tokens);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e2be6bb33d92..9a844caa3756 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -578,6 +578,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
578 struct pci_dev *dev; 578 struct pci_dev *dev;
579 struct pci_bus *bus; 579 struct pci_bus *bus;
580 bool blocked = eeepc_wlan_rfkill_blocked(eeepc); 580 bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
581 bool absent;
582 u32 l;
581 583
582 if (eeepc->wlan_rfkill) 584 if (eeepc->wlan_rfkill)
583 rfkill_set_sw_state(eeepc->wlan_rfkill, blocked); 585 rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
@@ -591,6 +593,22 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
591 goto out_unlock; 593 goto out_unlock;
592 } 594 }
593 595
596 if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
597 pr_err("Unable to read PCI config space?\n");
598 goto out_unlock;
599 }
600 absent = (l == 0xffffffff);
601
602 if (blocked != absent) {
603 pr_warning("BIOS says wireless lan is %s, "
604 "but the pci device is %s\n",
605 blocked ? "blocked" : "unblocked",
606 absent ? "absent" : "present");
607 pr_warning("skipped wireless hotplug as probably "
608 "inappropriate for this model\n");
609 goto out_unlock;
610 }
611
594 if (!blocked) { 612 if (!blocked) {
595 dev = pci_get_slot(bus, 0); 613 dev = pci_get_slot(bus, 0);
596 if (dev) { 614 if (dev) {
@@ -1277,7 +1295,8 @@ static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
1277 * hotplug code. In fact, current hotplug code seems to unplug another 1295 * hotplug code. In fact, current hotplug code seems to unplug another
1278 * device... 1296 * device...
1279 */ 1297 */
1280 if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) { 1298 if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
1299 strcmp(model, "1005PE") == 0) {
1281 eeepc->hotplug_disabled = true; 1300 eeepc->hotplug_disabled = true;
1282 pr_info("wlan hotplug disabled\n"); 1301 pr_info("wlan hotplug disabled\n");
1283 } 1302 }
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb603f1d55ca..e7b0c3bcef89 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -286,6 +286,7 @@ struct ibm_init_struct {
286 char param[32]; 286 char param[32];
287 287
288 int (*init) (struct ibm_init_struct *); 288 int (*init) (struct ibm_init_struct *);
289 mode_t base_procfs_mode;
289 struct ibm_struct *data; 290 struct ibm_struct *data;
290}; 291};
291 292
@@ -2082,6 +2083,7 @@ static struct attribute_set *hotkey_dev_attributes;
2082 2083
2083static void tpacpi_driver_event(const unsigned int hkey_event); 2084static void tpacpi_driver_event(const unsigned int hkey_event);
2084static void hotkey_driver_event(const unsigned int scancode); 2085static void hotkey_driver_event(const unsigned int scancode);
2086static void hotkey_poll_setup(const bool may_warn);
2085 2087
2086/* HKEY.MHKG() return bits */ 2088/* HKEY.MHKG() return bits */
2087#define TP_HOTKEY_TABLET_MASK (1 << 3) 2089#define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -2264,6 +2266,8 @@ static int tpacpi_hotkey_driver_mask_set(const u32 mask)
2264 2266
2265 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) & 2267 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
2266 ~hotkey_source_mask); 2268 ~hotkey_source_mask);
2269 hotkey_poll_setup(true);
2270
2267 mutex_unlock(&hotkey_mutex); 2271 mutex_unlock(&hotkey_mutex);
2268 2272
2269 return rc; 2273 return rc;
@@ -2548,7 +2552,7 @@ static void hotkey_poll_stop_sync(void)
2548} 2552}
2549 2553
2550/* call with hotkey_mutex held */ 2554/* call with hotkey_mutex held */
2551static void hotkey_poll_setup(bool may_warn) 2555static void hotkey_poll_setup(const bool may_warn)
2552{ 2556{
2553 const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask; 2557 const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
2554 const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask; 2558 const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
@@ -2579,7 +2583,7 @@ static void hotkey_poll_setup(bool may_warn)
2579 } 2583 }
2580} 2584}
2581 2585
2582static void hotkey_poll_setup_safe(bool may_warn) 2586static void hotkey_poll_setup_safe(const bool may_warn)
2583{ 2587{
2584 mutex_lock(&hotkey_mutex); 2588 mutex_lock(&hotkey_mutex);
2585 hotkey_poll_setup(may_warn); 2589 hotkey_poll_setup(may_warn);
@@ -2597,7 +2601,11 @@ static void hotkey_poll_set_freq(unsigned int freq)
2597 2601
2598#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ 2602#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
2599 2603
2600static void hotkey_poll_setup_safe(bool __unused) 2604static void hotkey_poll_setup(const bool __unused)
2605{
2606}
2607
2608static void hotkey_poll_setup_safe(const bool __unused)
2601{ 2609{
2602} 2610}
2603 2611
@@ -2607,16 +2615,11 @@ static int hotkey_inputdev_open(struct input_dev *dev)
2607{ 2615{
2608 switch (tpacpi_lifecycle) { 2616 switch (tpacpi_lifecycle) {
2609 case TPACPI_LIFE_INIT: 2617 case TPACPI_LIFE_INIT:
2610 /*
2611 * hotkey_init will call hotkey_poll_setup_safe
2612 * at the appropriate moment
2613 */
2614 return 0;
2615 case TPACPI_LIFE_EXITING:
2616 return -EBUSY;
2617 case TPACPI_LIFE_RUNNING: 2618 case TPACPI_LIFE_RUNNING:
2618 hotkey_poll_setup_safe(false); 2619 hotkey_poll_setup_safe(false);
2619 return 0; 2620 return 0;
2621 case TPACPI_LIFE_EXITING:
2622 return -EBUSY;
2620 } 2623 }
2621 2624
2622 /* Should only happen if tpacpi_lifecycle is corrupt */ 2625 /* Should only happen if tpacpi_lifecycle is corrupt */
@@ -2627,7 +2630,7 @@ static int hotkey_inputdev_open(struct input_dev *dev)
2627static void hotkey_inputdev_close(struct input_dev *dev) 2630static void hotkey_inputdev_close(struct input_dev *dev)
2628{ 2631{
2629 /* disable hotkey polling when possible */ 2632 /* disable hotkey polling when possible */
2630 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING && 2633 if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
2631 !(hotkey_source_mask & hotkey_driver_mask)) 2634 !(hotkey_source_mask & hotkey_driver_mask))
2632 hotkey_poll_setup_safe(false); 2635 hotkey_poll_setup_safe(false);
2633} 2636}
@@ -3655,13 +3658,19 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3655 break; 3658 break;
3656 case 3: 3659 case 3:
3657 /* 0x3000-0x3FFF: bay-related wakeups */ 3660 /* 0x3000-0x3FFF: bay-related wakeups */
3658 if (hkey == TP_HKEY_EV_BAYEJ_ACK) { 3661 switch (hkey) {
3662 case TP_HKEY_EV_BAYEJ_ACK:
3659 hotkey_autosleep_ack = 1; 3663 hotkey_autosleep_ack = 1;
3660 printk(TPACPI_INFO 3664 printk(TPACPI_INFO
3661 "bay ejected\n"); 3665 "bay ejected\n");
3662 hotkey_wakeup_hotunplug_complete_notify_change(); 3666 hotkey_wakeup_hotunplug_complete_notify_change();
3663 known_ev = true; 3667 known_ev = true;
3664 } else { 3668 break;
3669 case TP_HKEY_EV_OPTDRV_EJ:
3670 /* FIXME: kick libata if SATA link offline */
3671 known_ev = true;
3672 break;
3673 default:
3665 known_ev = false; 3674 known_ev = false;
3666 } 3675 }
3667 break; 3676 break;
@@ -3870,7 +3879,7 @@ enum {
3870 TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */ 3879 TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
3871 TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */ 3880 TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
3872 TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume: 3881 TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
3873 off / last state */ 3882 0 = disable, 1 = enable */
3874}; 3883};
3875 3884
3876enum { 3885enum {
@@ -3916,10 +3925,11 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
3916 } 3925 }
3917#endif 3926#endif
3918 3927
3919 /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
3920 status = TP_ACPI_BLUETOOTH_RESUMECTRL;
3921 if (state == TPACPI_RFK_RADIO_ON) 3928 if (state == TPACPI_RFK_RADIO_ON)
3922 status |= TP_ACPI_BLUETOOTH_RADIOSSW; 3929 status = TP_ACPI_BLUETOOTH_RADIOSSW
3930 | TP_ACPI_BLUETOOTH_RESUMECTRL;
3931 else
3932 status = 0;
3923 3933
3924 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) 3934 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
3925 return -EIO; 3935 return -EIO;
@@ -4070,7 +4080,7 @@ enum {
4070 TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */ 4080 TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
4071 TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */ 4081 TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
4072 TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume: 4082 TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
4073 off / last state */ 4083 0 = disable, 1 = enable */
4074}; 4084};
4075 4085
4076#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw" 4086#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
@@ -4107,10 +4117,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
4107 } 4117 }
4108#endif 4118#endif
4109 4119
4110 /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
4111 status = TP_ACPI_WANCARD_RESUMECTRL;
4112 if (state == TPACPI_RFK_RADIO_ON) 4120 if (state == TPACPI_RFK_RADIO_ON)
4113 status |= TP_ACPI_WANCARD_RADIOSSW; 4121 status = TP_ACPI_WANCARD_RADIOSSW
4122 | TP_ACPI_WANCARD_RESUMECTRL;
4123 else
4124 status = 0;
4114 4125
4115 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) 4126 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
4116 return -EIO; 4127 return -EIO;
@@ -4619,6 +4630,10 @@ static int video_read(struct seq_file *m)
4619 return 0; 4630 return 0;
4620 } 4631 }
4621 4632
4633 /* Even reads can crash X.org, so... */
4634 if (!capable(CAP_SYS_ADMIN))
4635 return -EPERM;
4636
4622 status = video_outputsw_get(); 4637 status = video_outputsw_get();
4623 if (status < 0) 4638 if (status < 0)
4624 return status; 4639 return status;
@@ -4652,6 +4667,10 @@ static int video_write(char *buf)
4652 if (video_supported == TPACPI_VIDEO_NONE) 4667 if (video_supported == TPACPI_VIDEO_NONE)
4653 return -ENODEV; 4668 return -ENODEV;
4654 4669
4670 /* Even reads can crash X.org, let alone writes... */
4671 if (!capable(CAP_SYS_ADMIN))
4672 return -EPERM;
4673
4655 enable = 0; 4674 enable = 0;
4656 disable = 0; 4675 disable = 0;
4657 4676
@@ -6133,13 +6152,13 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
6133 TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */ 6152 TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
6134 6153
6135 /* Models with ATI GPUs that can use ECNVRAM */ 6154 /* Models with ATI GPUs that can use ECNVRAM */
6136 TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), 6155 TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */
6137 TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6156 TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6138 TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6157 TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */
6139 TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6158 TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6140 6159
6141 /* Models with Intel Extreme Graphics 2 */ 6160 /* Models with Intel Extreme Graphics 2 */
6142 TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), 6161 TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */
6143 TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6162 TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6144 TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6163 TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6145 6164
@@ -6522,7 +6541,8 @@ static int volume_set_status(const u8 status)
6522 return volume_set_status_ec(status); 6541 return volume_set_status_ec(status);
6523} 6542}
6524 6543
6525static int volume_set_mute_ec(const bool mute) 6544/* returns < 0 on error, 0 on no change, 1 on change */
6545static int __volume_set_mute_ec(const bool mute)
6526{ 6546{
6527 int rc; 6547 int rc;
6528 u8 s, n; 6548 u8 s, n;
@@ -6537,22 +6557,37 @@ static int volume_set_mute_ec(const bool mute)
6537 n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK : 6557 n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
6538 s & ~TP_EC_AUDIO_MUTESW_MSK; 6558 s & ~TP_EC_AUDIO_MUTESW_MSK;
6539 6559
6540 if (n != s) 6560 if (n != s) {
6541 rc = volume_set_status_ec(n); 6561 rc = volume_set_status_ec(n);
6562 if (!rc)
6563 rc = 1;
6564 }
6542 6565
6543unlock: 6566unlock:
6544 mutex_unlock(&volume_mutex); 6567 mutex_unlock(&volume_mutex);
6545 return rc; 6568 return rc;
6546} 6569}
6547 6570
6571static int volume_alsa_set_mute(const bool mute)
6572{
6573 dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n",
6574 (mute) ? "" : "un");
6575 return __volume_set_mute_ec(mute);
6576}
6577
6548static int volume_set_mute(const bool mute) 6578static int volume_set_mute(const bool mute)
6549{ 6579{
6580 int rc;
6581
6550 dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n", 6582 dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
6551 (mute) ? "" : "un"); 6583 (mute) ? "" : "un");
6552 return volume_set_mute_ec(mute); 6584
6585 rc = __volume_set_mute_ec(mute);
6586 return (rc < 0) ? rc : 0;
6553} 6587}
6554 6588
6555static int volume_set_volume_ec(const u8 vol) 6589/* returns < 0 on error, 0 on no change, 1 on change */
6590static int __volume_set_volume_ec(const u8 vol)
6556{ 6591{
6557 int rc; 6592 int rc;
6558 u8 s, n; 6593 u8 s, n;
@@ -6569,19 +6604,22 @@ static int volume_set_volume_ec(const u8 vol)
6569 6604
6570 n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol; 6605 n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
6571 6606
6572 if (n != s) 6607 if (n != s) {
6573 rc = volume_set_status_ec(n); 6608 rc = volume_set_status_ec(n);
6609 if (!rc)
6610 rc = 1;
6611 }
6574 6612
6575unlock: 6613unlock:
6576 mutex_unlock(&volume_mutex); 6614 mutex_unlock(&volume_mutex);
6577 return rc; 6615 return rc;
6578} 6616}
6579 6617
6580static int volume_set_volume(const u8 vol) 6618static int volume_alsa_set_volume(const u8 vol)
6581{ 6619{
6582 dbg_printk(TPACPI_DBG_MIXER, 6620 dbg_printk(TPACPI_DBG_MIXER,
6583 "trying to set volume level to %hu\n", vol); 6621 "ALSA: trying to set volume level to %hu\n", vol);
6584 return volume_set_volume_ec(vol); 6622 return __volume_set_volume_ec(vol);
6585} 6623}
6586 6624
6587static void volume_alsa_notify_change(void) 6625static void volume_alsa_notify_change(void)
@@ -6628,7 +6666,7 @@ static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
6628static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol, 6666static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
6629 struct snd_ctl_elem_value *ucontrol) 6667 struct snd_ctl_elem_value *ucontrol)
6630{ 6668{
6631 return volume_set_volume(ucontrol->value.integer.value[0]); 6669 return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
6632} 6670}
6633 6671
6634#define volume_alsa_mute_info snd_ctl_boolean_mono_info 6672#define volume_alsa_mute_info snd_ctl_boolean_mono_info
@@ -6651,7 +6689,7 @@ static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
6651static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol, 6689static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
6652 struct snd_ctl_elem_value *ucontrol) 6690 struct snd_ctl_elem_value *ucontrol)
6653{ 6691{
6654 return volume_set_mute(!ucontrol->value.integer.value[0]); 6692 return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
6655} 6693}
6656 6694
6657static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = { 6695static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
@@ -8477,9 +8515,10 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
8477 "%s installed\n", ibm->name); 8515 "%s installed\n", ibm->name);
8478 8516
8479 if (ibm->read) { 8517 if (ibm->read) {
8480 mode_t mode; 8518 mode_t mode = iibm->base_procfs_mode;
8481 8519
8482 mode = S_IRUGO; 8520 if (!mode)
8521 mode = S_IRUGO;
8483 if (ibm->write) 8522 if (ibm->write)
8484 mode |= S_IWUSR; 8523 mode |= S_IWUSR;
8485 entry = proc_create_data(ibm->name, mode, proc_dir, 8524 entry = proc_create_data(ibm->name, mode, proc_dir,
@@ -8670,6 +8709,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
8670#ifdef CONFIG_THINKPAD_ACPI_VIDEO 8709#ifdef CONFIG_THINKPAD_ACPI_VIDEO
8671 { 8710 {
8672 .init = video_init, 8711 .init = video_init,
8712 .base_procfs_mode = S_IRUSR,
8673 .data = &video_driver_data, 8713 .data = &video_driver_data,
8674 }, 8714 },
8675#endif 8715#endif
@@ -9032,6 +9072,9 @@ static int __init thinkpad_acpi_module_init(void)
9032 return ret; 9072 return ret;
9033 } 9073 }
9034 } 9074 }
9075
9076 tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
9077
9035 ret = input_register_device(tpacpi_inputdev); 9078 ret = input_register_device(tpacpi_inputdev);
9036 if (ret < 0) { 9079 if (ret < 0) {
9037 printk(TPACPI_ERR "unable to register input device\n"); 9080 printk(TPACPI_ERR "unable to register input device\n");
@@ -9041,7 +9084,6 @@ static int __init thinkpad_acpi_module_init(void)
9041 tp_features.input_device_registered = 1; 9084 tp_features.input_device_registered = 1;
9042 } 9085 }
9043 9086
9044 tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
9045 return 0; 9087 return 0;
9046} 9088}
9047 9089
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 26c211724acf..405b969734d6 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -814,21 +814,23 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
814 if (hci_result == HCI_SUCCESS) { 814 if (hci_result == HCI_SUCCESS) {
815 if (value == 0x100) 815 if (value == 0x100)
816 continue; 816 continue;
817 else if (value & 0x80) { 817 /* act on key press; ignore key release */
818 key = toshiba_acpi_get_entry_by_scancode 818 if (value & 0x80)
819 (value & ~0x80); 819 continue;
820 if (!key) { 820
821 printk(MY_INFO "Unknown key %x\n", 821 key = toshiba_acpi_get_entry_by_scancode
822 value & ~0x80); 822 (value);
823 continue; 823 if (!key) {
824 } 824 printk(MY_INFO "Unknown key %x\n",
825 input_report_key(toshiba_acpi.hotkey_dev, 825 value);
826 key->keycode, 1); 826 continue;
827 input_sync(toshiba_acpi.hotkey_dev);
828 input_report_key(toshiba_acpi.hotkey_dev,
829 key->keycode, 0);
830 input_sync(toshiba_acpi.hotkey_dev);
831 } 827 }
828 input_report_key(toshiba_acpi.hotkey_dev,
829 key->keycode, 1);
830 input_sync(toshiba_acpi.hotkey_dev);
831 input_report_key(toshiba_acpi.hotkey_dev,
832 key->keycode, 0);
833 input_sync(toshiba_acpi.hotkey_dev);
832 } else if (hci_result == HCI_NOT_SUPPORTED) { 834 } else if (hci_result == HCI_NOT_SUPPORTED) {
833 /* This is a workaround for an unresolved issue on 835 /* This is a workaround for an unresolved issue on
834 * some machines where system events sporadically 836 * some machines where system events sporadically
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index d4b3d67f0548..bf1467213954 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -98,10 +98,10 @@ config BATTERY_WM97XX
98 Say Y to enable support for battery measured by WM97xx aux port. 98 Say Y to enable support for battery measured by WM97xx aux port.
99 99
100config BATTERY_BQ27x00 100config BATTERY_BQ27x00
101 tristate "BQ27200 battery driver" 101 tristate "BQ27x00 battery driver"
102 depends on I2C 102 depends on I2C
103 help 103 help
104 Say Y here to enable support for batteries with BQ27200(I2C) chip. 104 Say Y here to enable support for batteries with BQ27x00 (I2C) chips.
105 105
106config BATTERY_DA9030 106config BATTERY_DA9030
107 tristate "DA9030 battery driver" 107 tristate "DA9030 battery driver"
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 62bb98124e26..bece33ed873c 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -26,13 +26,22 @@
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <asm/unaligned.h> 27#include <asm/unaligned.h>
28 28
29#define DRIVER_VERSION "1.0.0" 29#define DRIVER_VERSION "1.1.0"
30 30
31#define BQ27x00_REG_TEMP 0x06 31#define BQ27x00_REG_TEMP 0x06
32#define BQ27x00_REG_VOLT 0x08 32#define BQ27x00_REG_VOLT 0x08
33#define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */
34#define BQ27x00_REG_AI 0x14 33#define BQ27x00_REG_AI 0x14
35#define BQ27x00_REG_FLAGS 0x0A 34#define BQ27x00_REG_FLAGS 0x0A
35#define BQ27x00_REG_TTE 0x16
36#define BQ27x00_REG_TTF 0x18
37#define BQ27x00_REG_TTECP 0x26
38
39#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
40#define BQ27000_FLAG_CHGS BIT(7)
41
42#define BQ27500_REG_SOC 0x2c
43#define BQ27500_FLAG_DSC BIT(0)
44#define BQ27500_FLAG_FC BIT(9)
36 45
37/* If the system has several batteries we need a different name for each 46/* If the system has several batteries we need a different name for each
38 * of them... 47 * of them...
@@ -46,25 +55,28 @@ struct bq27x00_access_methods {
46 struct bq27x00_device_info *di); 55 struct bq27x00_device_info *di);
47}; 56};
48 57
58enum bq27x00_chip { BQ27000, BQ27500 };
59
49struct bq27x00_device_info { 60struct bq27x00_device_info {
50 struct device *dev; 61 struct device *dev;
51 int id; 62 int id;
52 int voltage_uV;
53 int current_uA;
54 int temp_C;
55 int charge_rsoc;
56 struct bq27x00_access_methods *bus; 63 struct bq27x00_access_methods *bus;
57 struct power_supply bat; 64 struct power_supply bat;
65 enum bq27x00_chip chip;
58 66
59 struct i2c_client *client; 67 struct i2c_client *client;
60}; 68};
61 69
62static enum power_supply_property bq27x00_battery_props[] = { 70static enum power_supply_property bq27x00_battery_props[] = {
71 POWER_SUPPLY_PROP_STATUS,
63 POWER_SUPPLY_PROP_PRESENT, 72 POWER_SUPPLY_PROP_PRESENT,
64 POWER_SUPPLY_PROP_VOLTAGE_NOW, 73 POWER_SUPPLY_PROP_VOLTAGE_NOW,
65 POWER_SUPPLY_PROP_CURRENT_NOW, 74 POWER_SUPPLY_PROP_CURRENT_NOW,
66 POWER_SUPPLY_PROP_CAPACITY, 75 POWER_SUPPLY_PROP_CAPACITY,
67 POWER_SUPPLY_PROP_TEMP, 76 POWER_SUPPLY_PROP_TEMP,
77 POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
78 POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
79 POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
68}; 80};
69 81
70/* 82/*
@@ -74,16 +86,11 @@ static enum power_supply_property bq27x00_battery_props[] = {
74static int bq27x00_read(u8 reg, int *rt_value, int b_single, 86static int bq27x00_read(u8 reg, int *rt_value, int b_single,
75 struct bq27x00_device_info *di) 87 struct bq27x00_device_info *di)
76{ 88{
77 int ret; 89 return di->bus->read(reg, rt_value, b_single, di);
78
79 ret = di->bus->read(reg, rt_value, b_single, di);
80 *rt_value = be16_to_cpu(*rt_value);
81
82 return ret;
83} 90}
84 91
85/* 92/*
86 * Return the battery temperature in Celsius degrees 93 * Return the battery temperature in tenths of degree Celsius
87 * Or < 0 if something fails. 94 * Or < 0 if something fails.
88 */ 95 */
89static int bq27x00_battery_temperature(struct bq27x00_device_info *di) 96static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
@@ -97,7 +104,10 @@ static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
97 return ret; 104 return ret;
98 } 105 }
99 106
100 return (temp >> 2) - 273; 107 if (di->chip == BQ27500)
108 return temp - 2731;
109 else
110 return ((temp >> 2) - 273) * 10;
101} 111}
102 112
103/* 113/*
@@ -115,7 +125,7 @@ static int bq27x00_battery_voltage(struct bq27x00_device_info *di)
115 return ret; 125 return ret;
116 } 126 }
117 127
118 return volt; 128 return volt * 1000;
119} 129}
120 130
121/* 131/*
@@ -134,16 +144,23 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di)
134 dev_err(di->dev, "error reading current\n"); 144 dev_err(di->dev, "error reading current\n");
135 return 0; 145 return 0;
136 } 146 }
137 ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); 147
138 if (ret < 0) { 148 if (di->chip == BQ27500) {
139 dev_err(di->dev, "error reading flags\n"); 149 /* bq27500 returns signed value */
140 return 0; 150 curr = (int)(s16)curr;
141 } 151 } else {
142 if ((flags & (1 << 7)) != 0) { 152 ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
143 dev_dbg(di->dev, "negative current!\n"); 153 if (ret < 0) {
144 return -curr; 154 dev_err(di->dev, "error reading flags\n");
155 return 0;
156 }
157 if (flags & BQ27000_FLAG_CHGS) {
158 dev_dbg(di->dev, "negative current!\n");
159 curr = -curr;
160 }
145 } 161 }
146 return curr; 162
163 return curr * 1000;
147} 164}
148 165
149/* 166/*
@@ -155,13 +172,70 @@ static int bq27x00_battery_rsoc(struct bq27x00_device_info *di)
155 int ret; 172 int ret;
156 int rsoc = 0; 173 int rsoc = 0;
157 174
158 ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di); 175 if (di->chip == BQ27500)
176 ret = bq27x00_read(BQ27500_REG_SOC, &rsoc, 0, di);
177 else
178 ret = bq27x00_read(BQ27000_REG_RSOC, &rsoc, 1, di);
159 if (ret) { 179 if (ret) {
160 dev_err(di->dev, "error reading relative State-of-Charge\n"); 180 dev_err(di->dev, "error reading relative State-of-Charge\n");
161 return ret; 181 return ret;
162 } 182 }
163 183
164 return rsoc >> 8; 184 return rsoc;
185}
186
187static int bq27x00_battery_status(struct bq27x00_device_info *di,
188 union power_supply_propval *val)
189{
190 int flags = 0;
191 int status;
192 int ret;
193
194 ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
195 if (ret < 0) {
196 dev_err(di->dev, "error reading flags\n");
197 return ret;
198 }
199
200 if (di->chip == BQ27500) {
201 if (flags & BQ27500_FLAG_FC)
202 status = POWER_SUPPLY_STATUS_FULL;
203 else if (flags & BQ27500_FLAG_DSC)
204 status = POWER_SUPPLY_STATUS_DISCHARGING;
205 else
206 status = POWER_SUPPLY_STATUS_CHARGING;
207 } else {
208 if (flags & BQ27000_FLAG_CHGS)
209 status = POWER_SUPPLY_STATUS_CHARGING;
210 else
211 status = POWER_SUPPLY_STATUS_DISCHARGING;
212 }
213
214 val->intval = status;
215 return 0;
216}
217
218/*
219 * Read a time register.
220 * Return < 0 if something fails.
221 */
222static int bq27x00_battery_time(struct bq27x00_device_info *di, int reg,
223 union power_supply_propval *val)
224{
225 int tval = 0;
226 int ret;
227
228 ret = bq27x00_read(reg, &tval, 0, di);
229 if (ret) {
230 dev_err(di->dev, "error reading register %02x\n", reg);
231 return ret;
232 }
233
234 if (tval == 65535)
235 return -ENODATA;
236
237 val->intval = tval * 60;
238 return 0;
165} 239}
166 240
167#define to_bq27x00_device_info(x) container_of((x), \ 241#define to_bq27x00_device_info(x) container_of((x), \
@@ -171,9 +245,13 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
171 enum power_supply_property psp, 245 enum power_supply_property psp,
172 union power_supply_propval *val) 246 union power_supply_propval *val)
173{ 247{
248 int ret = 0;
174 struct bq27x00_device_info *di = to_bq27x00_device_info(psy); 249 struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
175 250
176 switch (psp) { 251 switch (psp) {
252 case POWER_SUPPLY_PROP_STATUS:
253 ret = bq27x00_battery_status(di, val);
254 break;
177 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 255 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
178 case POWER_SUPPLY_PROP_PRESENT: 256 case POWER_SUPPLY_PROP_PRESENT:
179 val->intval = bq27x00_battery_voltage(di); 257 val->intval = bq27x00_battery_voltage(di);
@@ -189,11 +267,20 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
189 case POWER_SUPPLY_PROP_TEMP: 267 case POWER_SUPPLY_PROP_TEMP:
190 val->intval = bq27x00_battery_temperature(di); 268 val->intval = bq27x00_battery_temperature(di);
191 break; 269 break;
270 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
271 ret = bq27x00_battery_time(di, BQ27x00_REG_TTE, val);
272 break;
273 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
274 ret = bq27x00_battery_time(di, BQ27x00_REG_TTECP, val);
275 break;
276 case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
277 ret = bq27x00_battery_time(di, BQ27x00_REG_TTF, val);
278 break;
192 default: 279 default:
193 return -EINVAL; 280 return -EINVAL;
194 } 281 }
195 282
196 return 0; 283 return ret;
197} 284}
198 285
199static void bq27x00_powersupply_init(struct bq27x00_device_info *di) 286static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
@@ -206,10 +293,10 @@ static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
206} 293}
207 294
208/* 295/*
209 * BQ27200 specific code 296 * i2c specific code
210 */ 297 */
211 298
212static int bq27200_read(u8 reg, int *rt_value, int b_single, 299static int bq27x00_read_i2c(u8 reg, int *rt_value, int b_single,
213 struct bq27x00_device_info *di) 300 struct bq27x00_device_info *di)
214{ 301{
215 struct i2c_client *client = di->client; 302 struct i2c_client *client = di->client;
@@ -238,7 +325,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single,
238 err = i2c_transfer(client->adapter, msg, 1); 325 err = i2c_transfer(client->adapter, msg, 1);
239 if (err >= 0) { 326 if (err >= 0) {
240 if (!b_single) 327 if (!b_single)
241 *rt_value = get_unaligned_be16(data); 328 *rt_value = get_unaligned_le16(data);
242 else 329 else
243 *rt_value = data[0]; 330 *rt_value = data[0];
244 331
@@ -248,7 +335,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single,
248 return err; 335 return err;
249} 336}
250 337
251static int bq27200_battery_probe(struct i2c_client *client, 338static int bq27x00_battery_probe(struct i2c_client *client,
252 const struct i2c_device_id *id) 339 const struct i2c_device_id *id)
253{ 340{
254 char *name; 341 char *name;
@@ -267,7 +354,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
267 if (retval < 0) 354 if (retval < 0)
268 return retval; 355 return retval;
269 356
270 name = kasprintf(GFP_KERNEL, "bq27200-%d", num); 357 name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
271 if (!name) { 358 if (!name) {
272 dev_err(&client->dev, "failed to allocate device name\n"); 359 dev_err(&client->dev, "failed to allocate device name\n");
273 retval = -ENOMEM; 360 retval = -ENOMEM;
@@ -281,6 +368,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
281 goto batt_failed_2; 368 goto batt_failed_2;
282 } 369 }
283 di->id = num; 370 di->id = num;
371 di->chip = id->driver_data;
284 372
285 bus = kzalloc(sizeof(*bus), GFP_KERNEL); 373 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
286 if (!bus) { 374 if (!bus) {
@@ -293,7 +381,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
293 i2c_set_clientdata(client, di); 381 i2c_set_clientdata(client, di);
294 di->dev = &client->dev; 382 di->dev = &client->dev;
295 di->bat.name = name; 383 di->bat.name = name;
296 bus->read = &bq27200_read; 384 bus->read = &bq27x00_read_i2c;
297 di->bus = bus; 385 di->bus = bus;
298 di->client = client; 386 di->client = client;
299 387
@@ -323,7 +411,7 @@ batt_failed_1:
323 return retval; 411 return retval;
324} 412}
325 413
326static int bq27200_battery_remove(struct i2c_client *client) 414static int bq27x00_battery_remove(struct i2c_client *client)
327{ 415{
328 struct bq27x00_device_info *di = i2c_get_clientdata(client); 416 struct bq27x00_device_info *di = i2c_get_clientdata(client);
329 417
@@ -344,27 +432,28 @@ static int bq27200_battery_remove(struct i2c_client *client)
344 * Module stuff 432 * Module stuff
345 */ 433 */
346 434
347static const struct i2c_device_id bq27200_id[] = { 435static const struct i2c_device_id bq27x00_id[] = {
348 { "bq27200", 0 }, 436 { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
437 { "bq27500", BQ27500 },
349 {}, 438 {},
350}; 439};
351 440
352static struct i2c_driver bq27200_battery_driver = { 441static struct i2c_driver bq27x00_battery_driver = {
353 .driver = { 442 .driver = {
354 .name = "bq27200-battery", 443 .name = "bq27x00-battery",
355 }, 444 },
356 .probe = bq27200_battery_probe, 445 .probe = bq27x00_battery_probe,
357 .remove = bq27200_battery_remove, 446 .remove = bq27x00_battery_remove,
358 .id_table = bq27200_id, 447 .id_table = bq27x00_id,
359}; 448};
360 449
361static int __init bq27x00_battery_init(void) 450static int __init bq27x00_battery_init(void)
362{ 451{
363 int ret; 452 int ret;
364 453
365 ret = i2c_add_driver(&bq27200_battery_driver); 454 ret = i2c_add_driver(&bq27x00_battery_driver);
366 if (ret) 455 if (ret)
367 printk(KERN_ERR "Unable to register BQ27200 driver\n"); 456 printk(KERN_ERR "Unable to register BQ27x00 driver\n");
368 457
369 return ret; 458 return ret;
370} 459}
@@ -372,7 +461,7 @@ module_init(bq27x00_battery_init);
372 461
373static void __exit bq27x00_battery_exit(void) 462static void __exit bq27x00_battery_exit(void)
374{ 463{
375 i2c_del_driver(&bq27200_battery_driver); 464 i2c_del_driver(&bq27x00_battery_driver);
376} 465}
377module_exit(bq27x00_battery_exit); 466module_exit(bq27x00_battery_exit);
378 467
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 3364198134a1..a2e71f7b27fb 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -509,7 +509,7 @@ static int da9030_battery_probe(struct platform_device *pdev)
509 509
510 charger->master = pdev->dev.parent; 510 charger->master = pdev->dev.parent;
511 511
512 /* 10 seconds between monotor runs unless platfrom defines other 512 /* 10 seconds between monitor runs unless platform defines other
513 interval */ 513 interval */
514 charger->interval = msecs_to_jiffies( 514 charger->interval = msecs_to_jiffies(
515 (pdata->batmon_interval ? : 10) * 1000); 515 (pdata->batmon_interval ? : 10) * 1000);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 6ea3cb5837c7..23eed356a854 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -26,7 +26,7 @@
26 26
27static DEFINE_MUTEX(bat_lock); 27static DEFINE_MUTEX(bat_lock);
28static struct work_struct bat_work; 28static struct work_struct bat_work;
29struct mutex work_lock; 29static struct mutex work_lock;
30static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN; 30static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
31static struct wm97xx_batt_info *gpdata; 31static struct wm97xx_batt_info *gpdata;
32static enum power_supply_property *prop; 32static enum power_supply_property *prop;
@@ -203,7 +203,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
203 goto err2; 203 goto err2;
204 ret = request_irq(gpio_to_irq(pdata->charge_gpio), 204 ret = request_irq(gpio_to_irq(pdata->charge_gpio),
205 wm97xx_chrg_irq, IRQF_DISABLED, 205 wm97xx_chrg_irq, IRQF_DISABLED,
206 "AC Detect", 0); 206 "AC Detect", dev);
207 if (ret) 207 if (ret)
208 goto err2; 208 goto err2;
209 props++; /* POWER_SUPPLY_PROP_STATUS */ 209 props++; /* POWER_SUPPLY_PROP_STATUS */
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 262f62eec837..834b48441829 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -27,6 +27,17 @@ config REGULATOR_DEBUG
27 help 27 help
28 Say yes here to enable debugging support. 28 Say yes here to enable debugging support.
29 29
30config REGULATOR_DUMMY
31 bool "Provide a dummy regulator if regulator lookups fail"
32 help
33 If this option is enabled then when a regulator lookup fails
34 and the board has not specified that it has provided full
35 constraints then the regulator core will provide an always
36 enabled dummy regulator will be provided, allowing consumer
37 drivers to continue.
38
39 A warning will be generated when this substitution is done.
40
30config REGULATOR_FIXED_VOLTAGE 41config REGULATOR_FIXED_VOLTAGE
31 tristate "Fixed voltage regulator support" 42 tristate "Fixed voltage regulator support"
32 help 43 help
@@ -69,6 +80,13 @@ config REGULATOR_MAX1586
69 regulator via I2C bus. The provided regulator is suitable 80 regulator via I2C bus. The provided regulator is suitable
70 for PXA27x chips to control VCC_CORE and VCC_USIM voltages. 81 for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
71 82
83config REGULATOR_MAX8649
84 tristate "Maxim 8649 voltage regulator"
85 depends on I2C
86 help
87 This driver controls a Maxim 8649 voltage output regulator via
88 I2C bus.
89
72config REGULATOR_MAX8660 90config REGULATOR_MAX8660
73 tristate "Maxim 8660/8661 voltage regulator" 91 tristate "Maxim 8660/8661 voltage regulator"
74 depends on I2C 92 depends on I2C
@@ -91,19 +109,26 @@ config REGULATOR_WM831X
91 of PMIC devices. 109 of PMIC devices.
92 110
93config REGULATOR_WM8350 111config REGULATOR_WM8350
94 tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC" 112 tristate "Wolfson Microelectronics WM8350 AudioPlus PMIC"
95 depends on MFD_WM8350 113 depends on MFD_WM8350
96 help 114 help
97 This driver provides support for the voltage and current regulators 115 This driver provides support for the voltage and current regulators
98 of the WM8350 AudioPlus PMIC. 116 of the WM8350 AudioPlus PMIC.
99 117
100config REGULATOR_WM8400 118config REGULATOR_WM8400
101 tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC" 119 tristate "Wolfson Microelectronics WM8400 AudioPlus PMIC"
102 depends on MFD_WM8400 120 depends on MFD_WM8400
103 help 121 help
104 This driver provides support for the voltage regulators of the 122 This driver provides support for the voltage regulators of the
105 WM8400 AudioPlus PMIC. 123 WM8400 AudioPlus PMIC.
106 124
125config REGULATOR_WM8994
126 tristate "Wolfson Microelectronics WM8994 CODEC"
127 depends on MFD_WM8994
128 help
129 This driver provides support for the voltage regulators on the
130 WM8994 CODEC.
131
107config REGULATOR_DA903X 132config REGULATOR_DA903X
108 tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC" 133 tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC"
109 depends on PMIC_DA903X 134 depends on PMIC_DA903X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b3c806c79415..e845b66ad59c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -9,15 +9,18 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
9obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o 9obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
10 10
11obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o 11obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
12obj-$(CONFIG_REGULATOR_DUMMY) += dummy.o
12obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o 13obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
13obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o 14obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
14obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o 15obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
16obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
15obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o 17obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
16obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o 18obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
17obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o 19obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
18obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o 20obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
19obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o 21obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
20obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o 22obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
23obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
21obj-$(CONFIG_REGULATOR_DA903X) += da903x.o 24obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
22obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o 25obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
23obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o 26obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b349db4504b7..7de950959ed2 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -561,7 +561,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
561 * for all the different regulators. 561 * for all the different regulators.
562 */ 562 */
563 563
564static int __init ab3100_regulators_probe(struct platform_device *pdev) 564static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
565{ 565{
566 struct ab3100_platform_data *plfdata = pdev->dev.platform_data; 566 struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
567 struct ab3100 *ab3100 = platform_get_drvdata(pdev); 567 struct ab3100 *ab3100 = platform_get_drvdata(pdev);
@@ -641,7 +641,7 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev)
641 return 0; 641 return 0;
642} 642}
643 643
644static int __exit ab3100_regulators_remove(struct platform_device *pdev) 644static int __devexit ab3100_regulators_remove(struct platform_device *pdev)
645{ 645{
646 int i; 646 int i;
647 647
@@ -659,7 +659,7 @@ static struct platform_driver ab3100_regulators_driver = {
659 .owner = THIS_MODULE, 659 .owner = THIS_MODULE,
660 }, 660 },
661 .probe = ab3100_regulators_probe, 661 .probe = ab3100_regulators_probe,
662 .remove = __exit_p(ab3100_regulators_remove), 662 .remove = __devexit_p(ab3100_regulators_remove),
663}; 663};
664 664
665static __init int ab3100_regulators_init(void) 665static __init int ab3100_regulators_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b60a4c9f8f16..c7bbe30010f7 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -19,10 +19,13 @@
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/delay.h>
22#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
23#include <linux/regulator/driver.h> 24#include <linux/regulator/driver.h>
24#include <linux/regulator/machine.h> 25#include <linux/regulator/machine.h>
25 26
27#include "dummy.h"
28
26#define REGULATOR_VERSION "0.5" 29#define REGULATOR_VERSION "0.5"
27 30
28static DEFINE_MUTEX(regulator_list_mutex); 31static DEFINE_MUTEX(regulator_list_mutex);
@@ -1084,6 +1087,13 @@ overflow_err:
1084 return NULL; 1087 return NULL;
1085} 1088}
1086 1089
1090static int _regulator_get_enable_time(struct regulator_dev *rdev)
1091{
1092 if (!rdev->desc->ops->enable_time)
1093 return 0;
1094 return rdev->desc->ops->enable_time(rdev);
1095}
1096
1087/* Internal regulator request function */ 1097/* Internal regulator request function */
1088static struct regulator *_regulator_get(struct device *dev, const char *id, 1098static struct regulator *_regulator_get(struct device *dev, const char *id,
1089 int exclusive) 1099 int exclusive)
@@ -1115,6 +1125,22 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1115 goto found; 1125 goto found;
1116 } 1126 }
1117 } 1127 }
1128
1129#ifdef CONFIG_REGULATOR_DUMMY
1130 if (!devname)
1131 devname = "deviceless";
1132
1133 /* If the board didn't flag that it was fully constrained then
1134 * substitute in a dummy regulator so consumers can continue.
1135 */
1136 if (!has_full_constraints) {
1137 pr_warning("%s supply %s not found, using dummy regulator\n",
1138 devname, id);
1139 rdev = dummy_regulator_rdev;
1140 goto found;
1141 }
1142#endif
1143
1118 mutex_unlock(&regulator_list_mutex); 1144 mutex_unlock(&regulator_list_mutex);
1119 return regulator; 1145 return regulator;
1120 1146
@@ -1251,7 +1277,7 @@ static int _regulator_can_change_status(struct regulator_dev *rdev)
1251/* locks held by regulator_enable() */ 1277/* locks held by regulator_enable() */
1252static int _regulator_enable(struct regulator_dev *rdev) 1278static int _regulator_enable(struct regulator_dev *rdev)
1253{ 1279{
1254 int ret; 1280 int ret, delay;
1255 1281
1256 /* do we need to enable the supply regulator first */ 1282 /* do we need to enable the supply regulator first */
1257 if (rdev->supply) { 1283 if (rdev->supply) {
@@ -1275,13 +1301,34 @@ static int _regulator_enable(struct regulator_dev *rdev)
1275 if (!_regulator_can_change_status(rdev)) 1301 if (!_regulator_can_change_status(rdev))
1276 return -EPERM; 1302 return -EPERM;
1277 1303
1278 if (rdev->desc->ops->enable) { 1304 if (!rdev->desc->ops->enable)
1279 ret = rdev->desc->ops->enable(rdev);
1280 if (ret < 0)
1281 return ret;
1282 } else {
1283 return -EINVAL; 1305 return -EINVAL;
1306
1307 /* Query before enabling in case configuration
1308 * dependant. */
1309 ret = _regulator_get_enable_time(rdev);
1310 if (ret >= 0) {
1311 delay = ret;
1312 } else {
1313 printk(KERN_WARNING
1314 "%s: enable_time() failed for %s: %d\n",
1315 __func__, rdev_get_name(rdev),
1316 ret);
1317 delay = 0;
1284 } 1318 }
1319
1320 /* Allow the regulator to ramp; it would be useful
1321 * to extend this for bulk operations so that the
1322 * regulators can ramp together. */
1323 ret = rdev->desc->ops->enable(rdev);
1324 if (ret < 0)
1325 return ret;
1326
1327 if (delay >= 1000)
1328 mdelay(delay / 1000);
1329 else if (delay)
1330 udelay(delay);
1331
1285 } else if (ret < 0) { 1332 } else if (ret < 0) {
1286 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", 1333 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
1287 __func__, rdev_get_name(rdev), ret); 1334 __func__, rdev_get_name(rdev), ret);
@@ -1341,6 +1388,9 @@ static int _regulator_disable(struct regulator_dev *rdev)
1341 __func__, rdev_get_name(rdev)); 1388 __func__, rdev_get_name(rdev));
1342 return ret; 1389 return ret;
1343 } 1390 }
1391
1392 _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
1393 NULL);
1344 } 1394 }
1345 1395
1346 /* decrease our supplies ref count and disable if required */ 1396 /* decrease our supplies ref count and disable if required */
@@ -1399,8 +1449,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
1399 return ret; 1449 return ret;
1400 } 1450 }
1401 /* notify other consumers that power has been forced off */ 1451 /* notify other consumers that power has been forced off */
1402 _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE, 1452 _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
1403 NULL); 1453 REGULATOR_EVENT_DISABLE, NULL);
1404 } 1454 }
1405 1455
1406 /* decrease our supplies ref count and disable if required */ 1456 /* decrease our supplies ref count and disable if required */
@@ -1434,9 +1484,9 @@ EXPORT_SYMBOL_GPL(regulator_force_disable);
1434 1484
1435static int _regulator_is_enabled(struct regulator_dev *rdev) 1485static int _regulator_is_enabled(struct regulator_dev *rdev)
1436{ 1486{
1437 /* sanity check */ 1487 /* If we don't know then assume that the regulator is always on */
1438 if (!rdev->desc->ops->is_enabled) 1488 if (!rdev->desc->ops->is_enabled)
1439 return -EINVAL; 1489 return 1;
1440 1490
1441 return rdev->desc->ops->is_enabled(rdev); 1491 return rdev->desc->ops->is_enabled(rdev);
1442} 1492}
@@ -2451,8 +2501,15 @@ EXPORT_SYMBOL_GPL(regulator_get_init_drvdata);
2451 2501
2452static int __init regulator_init(void) 2502static int __init regulator_init(void)
2453{ 2503{
2504 int ret;
2505
2454 printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION); 2506 printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
2455 return class_register(&regulator_class); 2507
2508 ret = class_register(&regulator_class);
2509
2510 regulator_dummy_init();
2511
2512 return ret;
2456} 2513}
2457 2514
2458/* init early to allow our consumers to complete system booting */ 2515/* init early to allow our consumers to complete system booting */
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
new file mode 100644
index 000000000000..c7410bde7b5d
--- /dev/null
+++ b/drivers/regulator/dummy.c
@@ -0,0 +1,66 @@
1/*
2 * dummy.c
3 *
4 * Copyright 2010 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This is useful for systems with mixed controllable and
14 * non-controllable regulators, as well as for allowing testing on
15 * systems with no controllable regulators.
16 */
17
18#include <linux/err.h>
19#include <linux/platform_device.h>
20#include <linux/regulator/driver.h>
21#include <linux/regulator/machine.h>
22
23#include "dummy.h"
24
25struct regulator_dev *dummy_regulator_rdev;
26
27static struct regulator_init_data dummy_initdata;
28
29static struct regulator_ops dummy_ops;
30
31static struct regulator_desc dummy_desc = {
32 .name = "dummy",
33 .id = -1,
34 .type = REGULATOR_VOLTAGE,
35 .owner = THIS_MODULE,
36 .ops = &dummy_ops,
37};
38
39static struct platform_device *dummy_pdev;
40
41void __init regulator_dummy_init(void)
42{
43 int ret;
44
45 dummy_pdev = platform_device_alloc("reg-dummy", -1);
46 if (!dummy_pdev) {
47 pr_err("Failed to allocate dummy regulator device\n");
48 return;
49 }
50
51 ret = platform_device_add(dummy_pdev);
52 if (ret != 0) {
53 pr_err("Failed to register dummy regulator device: %d\n", ret);
54 platform_device_put(dummy_pdev);
55 return;
56 }
57
58 dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
59 &dummy_initdata, NULL);
60 if (IS_ERR(dummy_regulator_rdev)) {
61 ret = PTR_ERR(dummy_regulator_rdev);
62 pr_err("Failed to register regulator: %d\n", ret);
63 platform_device_unregister(dummy_pdev);
64 return;
65 }
66}
diff --git a/drivers/regulator/dummy.h b/drivers/regulator/dummy.h
new file mode 100644
index 000000000000..3921c0e24249
--- /dev/null
+++ b/drivers/regulator/dummy.h
@@ -0,0 +1,31 @@
1/*
2 * dummy.h
3 *
4 * Copyright 2010 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This is useful for systems with mixed controllable and
14 * non-controllable regulators, as well as for allowing testing on
15 * systems with no controllable regulators.
16 */
17
18#ifndef _DUMMY_H
19#define _DUMMY_H
20
21struct regulator_dev;
22
23extern struct regulator_dev *dummy_regulator_rdev;
24
25#ifdef CONFIG_REGULATOR_DUMMY
26void __init regulator_dummy_init(void);
27#else
28static inline void regulator_dummy_init(void) { }
29#endif
30
31#endif
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f9f516a3028a..d11f7622430b 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -24,14 +24,16 @@
24#include <linux/regulator/driver.h> 24#include <linux/regulator/driver.h>
25#include <linux/regulator/fixed.h> 25#include <linux/regulator/fixed.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/delay.h>
27 28
28struct fixed_voltage_data { 29struct fixed_voltage_data {
29 struct regulator_desc desc; 30 struct regulator_desc desc;
30 struct regulator_dev *dev; 31 struct regulator_dev *dev;
31 int microvolts; 32 int microvolts;
32 int gpio; 33 int gpio;
33 unsigned enable_high:1; 34 unsigned startup_delay;
34 unsigned is_enabled:1; 35 bool enable_high;
36 bool is_enabled;
35}; 37};
36 38
37static int fixed_voltage_is_enabled(struct regulator_dev *dev) 39static int fixed_voltage_is_enabled(struct regulator_dev *dev)
@@ -47,7 +49,7 @@ static int fixed_voltage_enable(struct regulator_dev *dev)
47 49
48 if (gpio_is_valid(data->gpio)) { 50 if (gpio_is_valid(data->gpio)) {
49 gpio_set_value_cansleep(data->gpio, data->enable_high); 51 gpio_set_value_cansleep(data->gpio, data->enable_high);
50 data->is_enabled = 1; 52 data->is_enabled = true;
51 } 53 }
52 54
53 return 0; 55 return 0;
@@ -59,12 +61,19 @@ static int fixed_voltage_disable(struct regulator_dev *dev)
59 61
60 if (gpio_is_valid(data->gpio)) { 62 if (gpio_is_valid(data->gpio)) {
61 gpio_set_value_cansleep(data->gpio, !data->enable_high); 63 gpio_set_value_cansleep(data->gpio, !data->enable_high);
62 data->is_enabled = 0; 64 data->is_enabled = false;
63 } 65 }
64 66
65 return 0; 67 return 0;
66} 68}
67 69
70static int fixed_voltage_enable_time(struct regulator_dev *dev)
71{
72 struct fixed_voltage_data *data = rdev_get_drvdata(dev);
73
74 return data->startup_delay;
75}
76
68static int fixed_voltage_get_voltage(struct regulator_dev *dev) 77static int fixed_voltage_get_voltage(struct regulator_dev *dev)
69{ 78{
70 struct fixed_voltage_data *data = rdev_get_drvdata(dev); 79 struct fixed_voltage_data *data = rdev_get_drvdata(dev);
@@ -87,11 +96,12 @@ static struct regulator_ops fixed_voltage_ops = {
87 .is_enabled = fixed_voltage_is_enabled, 96 .is_enabled = fixed_voltage_is_enabled,
88 .enable = fixed_voltage_enable, 97 .enable = fixed_voltage_enable,
89 .disable = fixed_voltage_disable, 98 .disable = fixed_voltage_disable,
99 .enable_time = fixed_voltage_enable_time,
90 .get_voltage = fixed_voltage_get_voltage, 100 .get_voltage = fixed_voltage_get_voltage,
91 .list_voltage = fixed_voltage_list_voltage, 101 .list_voltage = fixed_voltage_list_voltage,
92}; 102};
93 103
94static int regulator_fixed_voltage_probe(struct platform_device *pdev) 104static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
95{ 105{
96 struct fixed_voltage_config *config = pdev->dev.platform_data; 106 struct fixed_voltage_config *config = pdev->dev.platform_data;
97 struct fixed_voltage_data *drvdata; 107 struct fixed_voltage_data *drvdata;
@@ -117,6 +127,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
117 127
118 drvdata->microvolts = config->microvolts; 128 drvdata->microvolts = config->microvolts;
119 drvdata->gpio = config->gpio; 129 drvdata->gpio = config->gpio;
130 drvdata->startup_delay = config->startup_delay;
120 131
121 if (gpio_is_valid(config->gpio)) { 132 if (gpio_is_valid(config->gpio)) {
122 drvdata->enable_high = config->enable_high; 133 drvdata->enable_high = config->enable_high;
@@ -163,7 +174,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
163 /* Regulator without GPIO control is considered 174 /* Regulator without GPIO control is considered
164 * always enabled 175 * always enabled
165 */ 176 */
166 drvdata->is_enabled = 1; 177 drvdata->is_enabled = true;
167 } 178 }
168 179
169 drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev, 180 drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
@@ -191,7 +202,7 @@ err:
191 return ret; 202 return ret;
192} 203}
193 204
194static int regulator_fixed_voltage_remove(struct platform_device *pdev) 205static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
195{ 206{
196 struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev); 207 struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
197 208
@@ -205,10 +216,11 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev)
205} 216}
206 217
207static struct platform_driver regulator_fixed_voltage_driver = { 218static struct platform_driver regulator_fixed_voltage_driver = {
208 .probe = regulator_fixed_voltage_probe, 219 .probe = reg_fixed_voltage_probe,
209 .remove = regulator_fixed_voltage_remove, 220 .remove = __devexit_p(reg_fixed_voltage_remove),
210 .driver = { 221 .driver = {
211 .name = "reg-fixed-voltage", 222 .name = "reg-fixed-voltage",
223 .owner = THIS_MODULE,
212 }, 224 },
213}; 225};
214 226
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 4f33a0f4a179..f5532ed79272 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -54,7 +54,7 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val);
54#define LP3971_BUCK2_BASE 0x29 54#define LP3971_BUCK2_BASE 0x29
55#define LP3971_BUCK3_BASE 0x32 55#define LP3971_BUCK3_BASE 0x32
56 56
57const static int buck_base_addr[] = { 57static const int buck_base_addr[] = {
58 LP3971_BUCK1_BASE, 58 LP3971_BUCK1_BASE,
59 LP3971_BUCK2_BASE, 59 LP3971_BUCK2_BASE,
60 LP3971_BUCK3_BASE, 60 LP3971_BUCK3_BASE,
@@ -63,7 +63,7 @@ const static int buck_base_addr[] = {
63#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x]) 63#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
64#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1) 64#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
65 65
66const static int buck_voltage_map[] = { 66static const int buck_voltage_map[] = {
67 0, 800, 850, 900, 950, 1000, 1050, 1100, 67 0, 800, 850, 900, 950, 1000, 1050, 1100,
68 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500, 68 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
69 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800, 69 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
@@ -96,17 +96,17 @@ const static int buck_voltage_map[] = {
96#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2) 96#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
97#define LDO_VOL_CONTR_MASK 0x0f 97#define LDO_VOL_CONTR_MASK 0x0f
98 98
99const static int ldo45_voltage_map[] = { 99static const int ldo45_voltage_map[] = {
100 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, 100 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
101 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300, 101 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
102}; 102};
103 103
104const static int ldo123_voltage_map[] = { 104static const int ldo123_voltage_map[] = {
105 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 105 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
106 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, 106 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
107}; 107};
108 108
109const static int *ldo_voltage_map[] = { 109static const int *ldo_voltage_map[] = {
110 ldo123_voltage_map, /* LDO1 */ 110 ldo123_voltage_map, /* LDO1 */
111 ldo123_voltage_map, /* LDO2 */ 111 ldo123_voltage_map, /* LDO2 */
112 ldo123_voltage_map, /* LDO3 */ 112 ldo123_voltage_map, /* LDO3 */
@@ -431,20 +431,20 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
431 return ret; 431 return ret;
432} 432}
433 433
434static int setup_regulators(struct lp3971 *lp3971, 434static int __devinit setup_regulators(struct lp3971 *lp3971,
435 struct lp3971_platform_data *pdata) 435 struct lp3971_platform_data *pdata)
436{ 436{
437 int i, err; 437 int i, err;
438 int num_regulators = pdata->num_regulators; 438
439 lp3971->num_regulators = num_regulators; 439 lp3971->num_regulators = pdata->num_regulators;
440 lp3971->rdev = kzalloc(sizeof(struct regulator_dev *) * num_regulators, 440 lp3971->rdev = kcalloc(pdata->num_regulators,
441 GFP_KERNEL); 441 sizeof(struct regulator_dev *), GFP_KERNEL);
442 442
443 /* Instantiate the regulators */ 443 /* Instantiate the regulators */
444 for (i = 0; i < num_regulators; i++) { 444 for (i = 0; i < pdata->num_regulators; i++) {
445 int id = pdata->regulators[i].id; 445 struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
446 lp3971->rdev[i] = regulator_register(&regulators[id], 446 lp3971->rdev[i] = regulator_register(&regulators[reg->id],
447 lp3971->dev, pdata->regulators[i].initdata, lp3971); 447 lp3971->dev, reg->initdata, lp3971);
448 448
449 if (IS_ERR(lp3971->rdev[i])) { 449 if (IS_ERR(lp3971->rdev[i])) {
450 err = PTR_ERR(lp3971->rdev[i]); 450 err = PTR_ERR(lp3971->rdev[i]);
@@ -455,10 +455,10 @@ static int setup_regulators(struct lp3971 *lp3971,
455 } 455 }
456 456
457 return 0; 457 return 0;
458
458error: 459error:
459 for (i = 0; i < num_regulators; i++) 460 while (--i >= 0)
460 if (lp3971->rdev[i]) 461 regulator_unregister(lp3971->rdev[i]);
461 regulator_unregister(lp3971->rdev[i]);
462 kfree(lp3971->rdev); 462 kfree(lp3971->rdev);
463 lp3971->rdev = NULL; 463 lp3971->rdev = NULL;
464 return err; 464 return err;
@@ -472,15 +472,17 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
472 int ret; 472 int ret;
473 u16 val; 473 u16 val;
474 474
475 lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL); 475 if (!pdata) {
476 if (lp3971 == NULL) { 476 dev_dbg(&i2c->dev, "No platform init data supplied\n");
477 ret = -ENOMEM; 477 return -ENODEV;
478 goto err;
479 } 478 }
480 479
480 lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL);
481 if (lp3971 == NULL)
482 return -ENOMEM;
483
481 lp3971->i2c = i2c; 484 lp3971->i2c = i2c;
482 lp3971->dev = &i2c->dev; 485 lp3971->dev = &i2c->dev;
483 i2c_set_clientdata(i2c, lp3971);
484 486
485 mutex_init(&lp3971->io_lock); 487 mutex_init(&lp3971->io_lock);
486 488
@@ -493,19 +495,15 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
493 goto err_detect; 495 goto err_detect;
494 } 496 }
495 497
496 if (pdata) { 498 ret = setup_regulators(lp3971, pdata);
497 ret = setup_regulators(lp3971, pdata); 499 if (ret < 0)
498 if (ret < 0) 500 goto err_detect;
499 goto err_detect;
500 } else
501 dev_warn(lp3971->dev, "No platform init data supplied\n");
502 501
502 i2c_set_clientdata(i2c, lp3971);
503 return 0; 503 return 0;
504 504
505err_detect: 505err_detect:
506 i2c_set_clientdata(i2c, NULL);
507 kfree(lp3971); 506 kfree(lp3971);
508err:
509 return ret; 507 return ret;
510} 508}
511 509
@@ -513,11 +511,13 @@ static int __devexit lp3971_i2c_remove(struct i2c_client *i2c)
513{ 511{
514 struct lp3971 *lp3971 = i2c_get_clientdata(i2c); 512 struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
515 int i; 513 int i;
514
515 i2c_set_clientdata(i2c, NULL);
516
516 for (i = 0; i < lp3971->num_regulators; i++) 517 for (i = 0; i < lp3971->num_regulators; i++)
517 if (lp3971->rdev[i]) 518 regulator_unregister(lp3971->rdev[i]);
518 regulator_unregister(lp3971->rdev[i]); 519
519 kfree(lp3971->rdev); 520 kfree(lp3971->rdev);
520 i2c_set_clientdata(i2c, NULL);
521 kfree(lp3971); 521 kfree(lp3971);
522 522
523 return 0; 523 return 0;
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 2c082d3ef484..a49fc952c9a9 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -179,8 +179,8 @@ static struct regulator_desc max1586_reg[] = {
179 }, 179 },
180}; 180};
181 181
182static int max1586_pmic_probe(struct i2c_client *client, 182static int __devinit max1586_pmic_probe(struct i2c_client *client,
183 const struct i2c_device_id *i2c_id) 183 const struct i2c_device_id *i2c_id)
184{ 184{
185 struct regulator_dev **rdev; 185 struct regulator_dev **rdev;
186 struct max1586_platform_data *pdata = client->dev.platform_data; 186 struct max1586_platform_data *pdata = client->dev.platform_data;
@@ -235,7 +235,7 @@ out:
235 return ret; 235 return ret;
236} 236}
237 237
238static int max1586_pmic_remove(struct i2c_client *client) 238static int __devexit max1586_pmic_remove(struct i2c_client *client)
239{ 239{
240 struct regulator_dev **rdev = i2c_get_clientdata(client); 240 struct regulator_dev **rdev = i2c_get_clientdata(client);
241 int i; 241 int i;
@@ -257,9 +257,10 @@ MODULE_DEVICE_TABLE(i2c, max1586_id);
257 257
258static struct i2c_driver max1586_pmic_driver = { 258static struct i2c_driver max1586_pmic_driver = {
259 .probe = max1586_pmic_probe, 259 .probe = max1586_pmic_probe,
260 .remove = max1586_pmic_remove, 260 .remove = __devexit_p(max1586_pmic_remove),
261 .driver = { 261 .driver = {
262 .name = "max1586", 262 .name = "max1586",
263 .owner = THIS_MODULE,
263 }, 264 },
264 .id_table = max1586_id, 265 .id_table = max1586_id,
265}; 266};
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
new file mode 100644
index 000000000000..3ebdf698c648
--- /dev/null
+++ b/drivers/regulator/max8649.c
@@ -0,0 +1,408 @@
1/*
2 * Regulators driver for Maxim max8649
3 *
4 * Copyright (C) 2009-2010 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/err.h>
14#include <linux/i2c.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/driver.h>
17#include <linux/regulator/max8649.h>
18
19#define MAX8649_DCDC_VMIN 750000 /* uV */
20#define MAX8649_DCDC_VMAX 1380000 /* uV */
21#define MAX8649_DCDC_STEP 10000 /* uV */
22#define MAX8649_VOL_MASK 0x3f
23
24/* Registers */
25#define MAX8649_MODE0 0x00
26#define MAX8649_MODE1 0x01
27#define MAX8649_MODE2 0x02
28#define MAX8649_MODE3 0x03
29#define MAX8649_CONTROL 0x04
30#define MAX8649_SYNC 0x05
31#define MAX8649_RAMP 0x06
32#define MAX8649_CHIP_ID1 0x08
33#define MAX8649_CHIP_ID2 0x09
34
35/* Bits */
36#define MAX8649_EN_PD (1 << 7)
37#define MAX8649_VID0_PD (1 << 6)
38#define MAX8649_VID1_PD (1 << 5)
39#define MAX8649_VID_MASK (3 << 5)
40
41#define MAX8649_FORCE_PWM (1 << 7)
42#define MAX8649_SYNC_EXTCLK (1 << 6)
43
44#define MAX8649_EXT_MASK (3 << 6)
45
46#define MAX8649_RAMP_MASK (7 << 5)
47#define MAX8649_RAMP_DOWN (1 << 1)
48
49struct max8649_regulator_info {
50 struct regulator_dev *regulator;
51 struct i2c_client *i2c;
52 struct device *dev;
53 struct mutex io_lock;
54
55 int vol_reg;
56 unsigned mode:2; /* bit[1:0] = VID1, VID0 */
57 unsigned extclk_freq:2;
58 unsigned extclk:1;
59 unsigned ramp_timing:3;
60 unsigned ramp_down:1;
61};
62
63/* I2C operations */
64
65static inline int max8649_read_device(struct i2c_client *i2c,
66 int reg, int bytes, void *dest)
67{
68 unsigned char data;
69 int ret;
70
71 data = (unsigned char)reg;
72 ret = i2c_master_send(i2c, &data, 1);
73 if (ret < 0)
74 return ret;
75 ret = i2c_master_recv(i2c, dest, bytes);
76 if (ret < 0)
77 return ret;
78 return 0;
79}
80
81static inline int max8649_write_device(struct i2c_client *i2c,
82 int reg, int bytes, void *src)
83{
84 unsigned char buf[bytes + 1];
85 int ret;
86
87 buf[0] = (unsigned char)reg;
88 memcpy(&buf[1], src, bytes);
89
90 ret = i2c_master_send(i2c, buf, bytes + 1);
91 if (ret < 0)
92 return ret;
93 return 0;
94}
95
96static int max8649_reg_read(struct i2c_client *i2c, int reg)
97{
98 struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
99 unsigned char data;
100 int ret;
101
102 mutex_lock(&info->io_lock);
103 ret = max8649_read_device(i2c, reg, 1, &data);
104 mutex_unlock(&info->io_lock);
105
106 if (ret < 0)
107 return ret;
108 return (int)data;
109}
110
111static int max8649_set_bits(struct i2c_client *i2c, int reg,
112 unsigned char mask, unsigned char data)
113{
114 struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
115 unsigned char value;
116 int ret;
117
118 mutex_lock(&info->io_lock);
119 ret = max8649_read_device(i2c, reg, 1, &value);
120 if (ret < 0)
121 goto out;
122 value &= ~mask;
123 value |= data;
124 ret = max8649_write_device(i2c, reg, 1, &value);
125out:
126 mutex_unlock(&info->io_lock);
127 return ret;
128}
129
130static inline int check_range(int min_uV, int max_uV)
131{
132 if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
133 || (min_uV > max_uV))
134 return -EINVAL;
135 return 0;
136}
137
138static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
139{
140 return (MAX8649_DCDC_VMIN + index * MAX8649_DCDC_STEP);
141}
142
143static int max8649_get_voltage(struct regulator_dev *rdev)
144{
145 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
146 unsigned char data;
147 int ret;
148
149 ret = max8649_reg_read(info->i2c, info->vol_reg);
150 if (ret < 0)
151 return ret;
152 data = (unsigned char)ret & MAX8649_VOL_MASK;
153 return max8649_list_voltage(rdev, data);
154}
155
156static int max8649_set_voltage(struct regulator_dev *rdev,
157 int min_uV, int max_uV)
158{
159 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
160 unsigned char data, mask;
161
162 if (check_range(min_uV, max_uV)) {
163 dev_err(info->dev, "invalid voltage range (%d, %d) uV\n",
164 min_uV, max_uV);
165 return -EINVAL;
166 }
167 data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1)
168 / MAX8649_DCDC_STEP;
169 mask = MAX8649_VOL_MASK;
170
171 return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
172}
173
174/* EN_PD means pulldown on EN input */
175static int max8649_enable(struct regulator_dev *rdev)
176{
177 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
178 return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0);
179}
180
181/*
182 * Applied internal pulldown resistor on EN input pin.
183 * If pulldown EN pin outside, it would be better.
184 */
185static int max8649_disable(struct regulator_dev *rdev)
186{
187 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
188 return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD,
189 MAX8649_EN_PD);
190}
191
192static int max8649_is_enabled(struct regulator_dev *rdev)
193{
194 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
195 int ret;
196
197 ret = max8649_reg_read(info->i2c, MAX8649_CONTROL);
198 if (ret < 0)
199 return ret;
200 return !((unsigned char)ret & MAX8649_EN_PD);
201}
202
203static int max8649_enable_time(struct regulator_dev *rdev)
204{
205 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
206 int voltage, rate, ret;
207
208 /* get voltage */
209 ret = max8649_reg_read(info->i2c, info->vol_reg);
210 if (ret < 0)
211 return ret;
212 ret &= MAX8649_VOL_MASK;
213 voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */
214
215 /* get rate */
216 ret = max8649_reg_read(info->i2c, MAX8649_RAMP);
217 if (ret < 0)
218 return ret;
219 ret = (ret & MAX8649_RAMP_MASK) >> 5;
220 rate = (32 * 1000) >> ret; /* uV/uS */
221
222 return (voltage / rate);
223}
224
225static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
226{
227 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
228
229 switch (mode) {
230 case REGULATOR_MODE_FAST:
231 max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM,
232 MAX8649_FORCE_PWM);
233 break;
234 case REGULATOR_MODE_NORMAL:
235 max8649_set_bits(info->i2c, info->vol_reg,
236 MAX8649_FORCE_PWM, 0);
237 break;
238 default:
239 return -EINVAL;
240 }
241 return 0;
242}
243
244static unsigned int max8649_get_mode(struct regulator_dev *rdev)
245{
246 struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
247 int ret;
248
249 ret = max8649_reg_read(info->i2c, info->vol_reg);
250 if (ret & MAX8649_FORCE_PWM)
251 return REGULATOR_MODE_FAST;
252 return REGULATOR_MODE_NORMAL;
253}
254
255static struct regulator_ops max8649_dcdc_ops = {
256 .set_voltage = max8649_set_voltage,
257 .get_voltage = max8649_get_voltage,
258 .list_voltage = max8649_list_voltage,
259 .enable = max8649_enable,
260 .disable = max8649_disable,
261 .is_enabled = max8649_is_enabled,
262 .enable_time = max8649_enable_time,
263 .set_mode = max8649_set_mode,
264 .get_mode = max8649_get_mode,
265
266};
267
268static struct regulator_desc dcdc_desc = {
269 .name = "max8649",
270 .ops = &max8649_dcdc_ops,
271 .type = REGULATOR_VOLTAGE,
272 .n_voltages = 1 << 6,
273 .owner = THIS_MODULE,
274};
275
276static int __devinit max8649_regulator_probe(struct i2c_client *client,
277 const struct i2c_device_id *id)
278{
279 struct max8649_platform_data *pdata = client->dev.platform_data;
280 struct max8649_regulator_info *info = NULL;
281 unsigned char data;
282 int ret;
283
284 info = kzalloc(sizeof(struct max8649_regulator_info), GFP_KERNEL);
285 if (!info) {
286 dev_err(&client->dev, "No enough memory\n");
287 return -ENOMEM;
288 }
289
290 info->i2c = client;
291 info->dev = &client->dev;
292 mutex_init(&info->io_lock);
293 i2c_set_clientdata(client, info);
294
295 info->mode = pdata->mode;
296 switch (info->mode) {
297 case 0:
298 info->vol_reg = MAX8649_MODE0;
299 break;
300 case 1:
301 info->vol_reg = MAX8649_MODE1;
302 break;
303 case 2:
304 info->vol_reg = MAX8649_MODE2;
305 break;
306 case 3:
307 info->vol_reg = MAX8649_MODE3;
308 break;
309 default:
310 break;
311 }
312
313 ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1);
314 if (ret < 0) {
315 dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
316 ret);
317 goto out;
318 }
319 dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret);
320
321 /* enable VID0 & VID1 */
322 max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
323
324 /* enable/disable external clock synchronization */
325 info->extclk = pdata->extclk;
326 data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
327 max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
328 if (info->extclk) {
329 /* set external clock frequency */
330 info->extclk_freq = pdata->extclk_freq;
331 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
332 info->extclk_freq);
333 }
334
335 if (pdata->ramp_timing) {
336 info->ramp_timing = pdata->ramp_timing;
337 max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK,
338 info->ramp_timing << 5);
339 }
340
341 info->ramp_down = pdata->ramp_down;
342 if (info->ramp_down) {
343 max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN,
344 MAX8649_RAMP_DOWN);
345 }
346
347 info->regulator = regulator_register(&dcdc_desc, &client->dev,
348 pdata->regulator, info);
349 if (IS_ERR(info->regulator)) {
350 dev_err(info->dev, "failed to register regulator %s\n",
351 dcdc_desc.name);
352 ret = PTR_ERR(info->regulator);
353 goto out;
354 }
355
356 dev_info(info->dev, "Max8649 regulator device is detected.\n");
357 return 0;
358out:
359 kfree(info);
360 return ret;
361}
362
363static int __devexit max8649_regulator_remove(struct i2c_client *client)
364{
365 struct max8649_regulator_info *info = i2c_get_clientdata(client);
366
367 if (info) {
368 if (info->regulator)
369 regulator_unregister(info->regulator);
370 kfree(info);
371 }
372 i2c_set_clientdata(client, NULL);
373
374 return 0;
375}
376
377static const struct i2c_device_id max8649_id[] = {
378 { "max8649", 0 },
379 { }
380};
381MODULE_DEVICE_TABLE(i2c, max8649_id);
382
383static struct i2c_driver max8649_driver = {
384 .probe = max8649_regulator_probe,
385 .remove = __devexit_p(max8649_regulator_remove),
386 .driver = {
387 .name = "max8649",
388 },
389 .id_table = max8649_id,
390};
391
392static int __init max8649_init(void)
393{
394 return i2c_add_driver(&max8649_driver);
395}
396subsys_initcall(max8649_init);
397
398static void __exit max8649_exit(void)
399{
400 i2c_del_driver(&max8649_driver);
401}
402module_exit(max8649_exit);
403
404/* Module information */
405MODULE_DESCRIPTION("MAXIM 8649 voltage regulator driver");
406MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
407MODULE_LICENSE("GPL");
408
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index acc2fb7b6087..f12f1bb62138 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -345,8 +345,8 @@ static struct regulator_desc max8660_reg[] = {
345 }, 345 },
346}; 346};
347 347
348static int max8660_probe(struct i2c_client *client, 348static int __devinit max8660_probe(struct i2c_client *client,
349 const struct i2c_device_id *i2c_id) 349 const struct i2c_device_id *i2c_id)
350{ 350{
351 struct regulator_dev **rdev; 351 struct regulator_dev **rdev;
352 struct max8660_platform_data *pdata = client->dev.platform_data; 352 struct max8660_platform_data *pdata = client->dev.platform_data;
@@ -354,7 +354,7 @@ static int max8660_probe(struct i2c_client *client,
354 int boot_on, i, id, ret = -EINVAL; 354 int boot_on, i, id, ret = -EINVAL;
355 355
356 if (pdata->num_subdevs > MAX8660_V_END) { 356 if (pdata->num_subdevs > MAX8660_V_END) {
357 dev_err(&client->dev, "Too much regulators found!\n"); 357 dev_err(&client->dev, "Too many regulators found!\n");
358 goto out; 358 goto out;
359 } 359 }
360 360
@@ -462,7 +462,7 @@ out:
462 return ret; 462 return ret;
463} 463}
464 464
465static int max8660_remove(struct i2c_client *client) 465static int __devexit max8660_remove(struct i2c_client *client)
466{ 466{
467 struct regulator_dev **rdev = i2c_get_clientdata(client); 467 struct regulator_dev **rdev = i2c_get_clientdata(client);
468 int i; 468 int i;
@@ -485,9 +485,10 @@ MODULE_DEVICE_TABLE(i2c, max8660_id);
485 485
486static struct i2c_driver max8660_driver = { 486static struct i2c_driver max8660_driver = {
487 .probe = max8660_probe, 487 .probe = max8660_probe,
488 .remove = max8660_remove, 488 .remove = __devexit_p(max8660_remove),
489 .driver = { 489 .driver = {
490 .name = "max8660", 490 .name = "max8660",
491 .owner = THIS_MODULE,
491 }, 492 },
492 .id_table = max8660_id, 493 .id_table = max8660_id,
493}; 494};
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 39c495300045..f7b81845a196 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -2,6 +2,7 @@
2 * Regulator Driver for Freescale MC13783 PMIC 2 * Regulator Driver for Freescale MC13783 PMIC
3 * 3 *
4 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 4 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
5 * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -16,11 +17,44 @@
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/err.h> 18#include <linux/err.h>
18 19
19#define MC13783_REG_SWITCHERS4 28
20#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
21
22#define MC13783_REG_SWITCHERS5 29 20#define MC13783_REG_SWITCHERS5 29
23#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20) 21#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
22#define MC13783_REG_SWITCHERS5_SW3VSEL 18
23#define MC13783_REG_SWITCHERS5_SW3VSEL_M (3 << 18)
24
25#define MC13783_REG_REGULATORSETTING0 30
26#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL 2
27#define MC13783_REG_REGULATORSETTING0_VDIGVSEL 4
28#define MC13783_REG_REGULATORSETTING0_VGENVSEL 6
29#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL 9
30#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL 11
31#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL 13
32#define MC13783_REG_REGULATORSETTING0_VSIMVSEL 14
33#define MC13783_REG_REGULATORSETTING0_VESIMVSEL 15
34#define MC13783_REG_REGULATORSETTING0_VCAMVSEL 16
35
36#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL_M (3 << 2)
37#define MC13783_REG_REGULATORSETTING0_VDIGVSEL_M (3 << 4)
38#define MC13783_REG_REGULATORSETTING0_VGENVSEL_M (7 << 6)
39#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL_M (3 << 9)
40#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL_M (3 << 11)
41#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL_M (1 << 13)
42#define MC13783_REG_REGULATORSETTING0_VSIMVSEL_M (1 << 14)
43#define MC13783_REG_REGULATORSETTING0_VESIMVSEL_M (1 << 15)
44#define MC13783_REG_REGULATORSETTING0_VCAMVSEL_M (7 << 16)
45
46#define MC13783_REG_REGULATORSETTING1 31
47#define MC13783_REG_REGULATORSETTING1_VVIBVSEL 0
48#define MC13783_REG_REGULATORSETTING1_VRF1VSEL 2
49#define MC13783_REG_REGULATORSETTING1_VRF2VSEL 4
50#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL 6
51#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL 9
52
53#define MC13783_REG_REGULATORSETTING1_VVIBVSEL_M (3 << 0)
54#define MC13783_REG_REGULATORSETTING1_VRF1VSEL_M (3 << 2)
55#define MC13783_REG_REGULATORSETTING1_VRF2VSEL_M (3 << 4)
56#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL_M (7 << 6)
57#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL_M (7 << 9)
24 58
25#define MC13783_REG_REGULATORMODE0 32 59#define MC13783_REG_REGULATORMODE0 32
26#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0) 60#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
@@ -48,19 +82,107 @@
48#define MC13783_REG_POWERMISC_GPO2EN (1 << 8) 82#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
49#define MC13783_REG_POWERMISC_GPO3EN (1 << 10) 83#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
50#define MC13783_REG_POWERMISC_GPO4EN (1 << 12) 84#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
85#define MC13783_REG_POWERMISC_PWGT1SPIEN (1 << 15)
86#define MC13783_REG_POWERMISC_PWGT2SPIEN (1 << 16)
87
88#define MC13783_REG_POWERMISC_PWGTSPI_M (3 << 15)
89
51 90
52struct mc13783_regulator { 91struct mc13783_regulator {
53 struct regulator_desc desc; 92 struct regulator_desc desc;
54 int reg; 93 int reg;
55 int enable_bit; 94 int enable_bit;
95 int vsel_reg;
96 int vsel_shift;
97 int vsel_mask;
98 int const *voltages;
99};
100
101/* Voltage Values */
102static const int const mc13783_sw3_val[] = {
103 5000000, 5000000, 5000000, 5500000,
104};
105
106static const int const mc13783_vaudio_val[] = {
107 2775000,
108};
109
110static const int const mc13783_viohi_val[] = {
111 2775000,
112};
113
114static const int const mc13783_violo_val[] = {
115 1200000, 1300000, 1500000, 1800000,
116};
117
118static const int const mc13783_vdig_val[] = {
119 1200000, 1300000, 1500000, 1800000,
120};
121
122static const int const mc13783_vgen_val[] = {
123 1200000, 1300000, 1500000, 1800000,
124 1100000, 2000000, 2775000, 2400000,
125};
126
127static const int const mc13783_vrfdig_val[] = {
128 1200000, 1500000, 1800000, 1875000,
129};
130
131static const int const mc13783_vrfref_val[] = {
132 2475000, 2600000, 2700000, 2775000,
133};
134
135static const int const mc13783_vrfcp_val[] = {
136 2700000, 2775000,
137};
138
139static const int const mc13783_vsim_val[] = {
140 1800000, 2900000, 3000000,
141};
142
143static const int const mc13783_vesim_val[] = {
144 1800000, 2900000,
145};
146
147static const int const mc13783_vcam_val[] = {
148 1500000, 1800000, 2500000, 2550000,
149 2600000, 2750000, 2800000, 3000000,
150};
151
152static const int const mc13783_vrfbg_val[] = {
153 1250000,
154};
155
156static const int const mc13783_vvib_val[] = {
157 1300000, 1800000, 2000000, 3000000,
158};
159
160static const int const mc13783_vmmc_val[] = {
161 1600000, 1800000, 2000000, 2600000,
162 2700000, 2800000, 2900000, 3000000,
163};
164
165static const int const mc13783_vrf_val[] = {
166 1500000, 1875000, 2700000, 2775000,
167};
168
169static const int const mc13783_gpo_val[] = {
170 3100000,
171};
172
173static const int const mc13783_pwgtdrv_val[] = {
174 5500000,
56}; 175};
57 176
58static struct regulator_ops mc13783_regulator_ops; 177static struct regulator_ops mc13783_regulator_ops;
178static struct regulator_ops mc13783_fixed_regulator_ops;
179static struct regulator_ops mc13783_gpo_regulator_ops;
59 180
60#define MC13783_DEFINE(prefix, _name, _reg) \ 181#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages) \
61 [MC13783_ ## prefix ## _ ## _name] = { \ 182 [MC13783_ ## prefix ## _ ## _name] = { \
62 .desc = { \ 183 .desc = { \
63 .name = #prefix "_" #_name, \ 184 .name = #prefix "_" #_name, \
185 .n_voltages = ARRAY_SIZE(_voltages), \
64 .ops = &mc13783_regulator_ops, \ 186 .ops = &mc13783_regulator_ops, \
65 .type = REGULATOR_VOLTAGE, \ 187 .type = REGULATOR_VOLTAGE, \
66 .id = MC13783_ ## prefix ## _ ## _name, \ 188 .id = MC13783_ ## prefix ## _ ## _name, \
@@ -68,40 +190,92 @@ static struct regulator_ops mc13783_regulator_ops;
68 }, \ 190 }, \
69 .reg = MC13783_REG_ ## _reg, \ 191 .reg = MC13783_REG_ ## _reg, \
70 .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \ 192 .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
193 .vsel_reg = MC13783_REG_ ## _vsel_reg, \
194 .vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
195 .vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
196 .voltages = _voltages, \
197 }
198
199#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages) \
200 [MC13783_ ## prefix ## _ ## _name] = { \
201 .desc = { \
202 .name = #prefix "_" #_name, \
203 .n_voltages = ARRAY_SIZE(_voltages), \
204 .ops = &mc13783_fixed_regulator_ops, \
205 .type = REGULATOR_VOLTAGE, \
206 .id = MC13783_ ## prefix ## _ ## _name, \
207 .owner = THIS_MODULE, \
208 }, \
209 .reg = MC13783_REG_ ## _reg, \
210 .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
211 .voltages = _voltages, \
71 } 212 }
72 213
73#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg) 214#define MC13783_GPO_DEFINE(prefix, _name, _reg, _voltages) \
74#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg) 215 [MC13783_ ## prefix ## _ ## _name] = { \
216 .desc = { \
217 .name = #prefix "_" #_name, \
218 .n_voltages = ARRAY_SIZE(_voltages), \
219 .ops = &mc13783_gpo_regulator_ops, \
220 .type = REGULATOR_VOLTAGE, \
221 .id = MC13783_ ## prefix ## _ ## _name, \
222 .owner = THIS_MODULE, \
223 }, \
224 .reg = MC13783_REG_ ## _reg, \
225 .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
226 .voltages = _voltages, \
227 }
228
229#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \
230 MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
231#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \
232 MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
75 233
76static struct mc13783_regulator mc13783_regulators[] = { 234static struct mc13783_regulator mc13783_regulators[] = {
77 MC13783_DEFINE_SW(SW3, SWITCHERS5), 235 MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
78 MC13783_DEFINE_SW(PLL, SWITCHERS4), 236
79 237 MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
80 MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0), 238 MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
81 MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0), 239 MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \
82 MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0), 240 mc13783_violo_val),
83 MC13783_DEFINE_REGU(VDIG, REGULATORMODE0), 241 MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
84 MC13783_DEFINE_REGU(VGEN, REGULATORMODE0), 242 mc13783_vdig_val),
85 MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0), 243 MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, \
86 MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0), 244 mc13783_vgen_val),
87 MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0), 245 MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, \
88 MC13783_DEFINE_REGU(VSIM, REGULATORMODE1), 246 mc13783_vrfdig_val),
89 MC13783_DEFINE_REGU(VESIM, REGULATORMODE1), 247 MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, \
90 MC13783_DEFINE_REGU(VCAM, REGULATORMODE1), 248 mc13783_vrfref_val),
91 MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1), 249 MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, \
92 MC13783_DEFINE_REGU(VVIB, REGULATORMODE1), 250 mc13783_vrfcp_val),
93 MC13783_DEFINE_REGU(VRF1, REGULATORMODE1), 251 MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, \
94 MC13783_DEFINE_REGU(VRF2, REGULATORMODE1), 252 mc13783_vsim_val),
95 MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1), 253 MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, \
96 MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1), 254 mc13783_vesim_val),
97 MC13783_DEFINE_REGU(GPO1, POWERMISC), 255 MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
98 MC13783_DEFINE_REGU(GPO2, POWERMISC), 256 mc13783_vcam_val),
99 MC13783_DEFINE_REGU(GPO3, POWERMISC), 257 MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
100 MC13783_DEFINE_REGU(GPO4, POWERMISC), 258 MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \
259 mc13783_vvib_val),
260 MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \
261 mc13783_vrf_val),
262 MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, \
263 mc13783_vrf_val),
264 MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, \
265 mc13783_vmmc_val),
266 MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \
267 mc13783_vmmc_val),
268 MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val),
269 MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val),
270 MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val),
271 MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val),
272 MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
273 MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
101}; 274};
102 275
103struct mc13783_regulator_priv { 276struct mc13783_regulator_priv {
104 struct mc13783 *mc13783; 277 struct mc13783 *mc13783;
278 u32 powermisc_pwgt_state;
105 struct regulator_dev *regulators[]; 279 struct regulator_dev *regulators[];
106}; 280};
107 281
@@ -154,10 +328,241 @@ static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
154 return (val & mc13783_regulators[id].enable_bit) != 0; 328 return (val & mc13783_regulators[id].enable_bit) != 0;
155} 329}
156 330
331static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
332 unsigned selector)
333{
334 int id = rdev_get_id(rdev);
335
336 if (selector >= mc13783_regulators[id].desc.n_voltages)
337 return -EINVAL;
338
339 return mc13783_regulators[id].voltages[selector];
340}
341
342static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
343 int min_uV, int max_uV)
344{
345 int reg_id = rdev_get_id(rdev);
346 int i;
347 int bestmatch;
348 int bestindex;
349
350 /*
351 * Locate the minimum voltage fitting the criteria on
352 * this regulator. The switchable voltages are not
353 * in strict falling order so we need to check them
354 * all for the best match.
355 */
356 bestmatch = INT_MAX;
357 bestindex = -1;
358 for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
359 if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
360 mc13783_regulators[reg_id].voltages[i] < bestmatch) {
361 bestmatch = mc13783_regulators[reg_id].voltages[i];
362 bestindex = i;
363 }
364 }
365
366 if (bestindex < 0 || bestmatch > max_uV) {
367 dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
368 min_uV, max_uV);
369 return -EINVAL;
370 }
371 return bestindex;
372}
373
374static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
375 int min_uV, int max_uV)
376{
377 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
378 int value, id = rdev_get_id(rdev);
379 int ret;
380
381 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
382 __func__, id, min_uV, max_uV);
383
384 /* Find the best index */
385 value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
386 dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
387 if (value < 0)
388 return value;
389
390 mc13783_lock(priv->mc13783);
391 ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
392 mc13783_regulators[id].vsel_mask,
393 value << mc13783_regulators[id].vsel_shift);
394 mc13783_unlock(priv->mc13783);
395
396 return ret;
397}
398
399static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
400{
401 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
402 int ret, id = rdev_get_id(rdev);
403 unsigned int val;
404
405 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
406
407 mc13783_lock(priv->mc13783);
408 ret = mc13783_reg_read(priv->mc13783,
409 mc13783_regulators[id].vsel_reg, &val);
410 mc13783_unlock(priv->mc13783);
411
412 if (ret)
413 return ret;
414
415 val = (val & mc13783_regulators[id].vsel_mask)
416 >> mc13783_regulators[id].vsel_shift;
417
418 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
419
420 BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
421
422 return mc13783_regulators[id].voltages[val];
423}
424
157static struct regulator_ops mc13783_regulator_ops = { 425static struct regulator_ops mc13783_regulator_ops = {
158 .enable = mc13783_regulator_enable, 426 .enable = mc13783_regulator_enable,
159 .disable = mc13783_regulator_disable, 427 .disable = mc13783_regulator_disable,
160 .is_enabled = mc13783_regulator_is_enabled, 428 .is_enabled = mc13783_regulator_is_enabled,
429 .list_voltage = mc13783_regulator_list_voltage,
430 .set_voltage = mc13783_regulator_set_voltage,
431 .get_voltage = mc13783_regulator_get_voltage,
432};
433
434static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
435 int min_uV, int max_uV)
436{
437 int id = rdev_get_id(rdev);
438
439 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
440 __func__, id, min_uV, max_uV);
441
442 if (min_uV > mc13783_regulators[id].voltages[0] &&
443 max_uV < mc13783_regulators[id].voltages[0])
444 return 0;
445 else
446 return -EINVAL;
447}
448
449static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
450{
451 int id = rdev_get_id(rdev);
452
453 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
454
455 return mc13783_regulators[id].voltages[0];
456}
457
458static struct regulator_ops mc13783_fixed_regulator_ops = {
459 .enable = mc13783_regulator_enable,
460 .disable = mc13783_regulator_disable,
461 .is_enabled = mc13783_regulator_is_enabled,
462 .list_voltage = mc13783_regulator_list_voltage,
463 .set_voltage = mc13783_fixed_regulator_set_voltage,
464 .get_voltage = mc13783_fixed_regulator_get_voltage,
465};
466
467int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
468 u32 val)
469{
470 struct mc13783 *mc13783 = priv->mc13783;
471 int ret;
472 u32 valread;
473
474 BUG_ON(val & ~mask);
475
476 ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
477 if (ret)
478 return ret;
479
480 /* Update the stored state for Power Gates. */
481 priv->powermisc_pwgt_state =
482 (priv->powermisc_pwgt_state & ~mask) | val;
483 priv->powermisc_pwgt_state &= MC13783_REG_POWERMISC_PWGTSPI_M;
484
485 /* Construct the new register value */
486 valread = (valread & ~mask) | val;
487 /* Overwrite the PWGTxEN with the stored version */
488 valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
489 priv->powermisc_pwgt_state;
490
491 return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
492}
493
494static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
495{
496 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
497 int id = rdev_get_id(rdev);
498 int ret;
499 u32 en_val = mc13783_regulators[id].enable_bit;
500
501 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
502
503 /* Power Gate enable value is 0 */
504 if (id == MC13783_REGU_PWGT1SPI ||
505 id == MC13783_REGU_PWGT2SPI)
506 en_val = 0;
507
508 mc13783_lock(priv->mc13783);
509 ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
510 en_val);
511 mc13783_unlock(priv->mc13783);
512
513 return ret;
514}
515
516static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
517{
518 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
519 int id = rdev_get_id(rdev);
520 int ret;
521 u32 dis_val = 0;
522
523 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
524
525 /* Power Gate disable value is 1 */
526 if (id == MC13783_REGU_PWGT1SPI ||
527 id == MC13783_REGU_PWGT2SPI)
528 dis_val = mc13783_regulators[id].enable_bit;
529
530 mc13783_lock(priv->mc13783);
531 ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
532 dis_val);
533 mc13783_unlock(priv->mc13783);
534
535 return ret;
536}
537
538static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
539{
540 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
541 int ret, id = rdev_get_id(rdev);
542 unsigned int val;
543
544 mc13783_lock(priv->mc13783);
545 ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
546 mc13783_unlock(priv->mc13783);
547
548 if (ret)
549 return ret;
550
551 /* Power Gates state is stored in powermisc_pwgt_state
552 * where the meaning of bits is negated */
553 val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
554 (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
555
556 return (val & mc13783_regulators[id].enable_bit) != 0;
557}
558
559static struct regulator_ops mc13783_gpo_regulator_ops = {
560 .enable = mc13783_gpo_regulator_enable,
561 .disable = mc13783_gpo_regulator_disable,
562 .is_enabled = mc13783_gpo_regulator_is_enabled,
563 .list_voltage = mc13783_regulator_list_voltage,
564 .set_voltage = mc13783_fixed_regulator_set_voltage,
565 .get_voltage = mc13783_fixed_regulator_get_voltage,
161}; 566};
162 567
163static int __devinit mc13783_regulator_probe(struct platform_device *pdev) 568static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 33d7d899e030..29d0566379ae 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -288,16 +288,18 @@ static int __devexit pcap_regulator_remove(struct platform_device *pdev)
288 struct regulator_dev *rdev = platform_get_drvdata(pdev); 288 struct regulator_dev *rdev = platform_get_drvdata(pdev);
289 289
290 regulator_unregister(rdev); 290 regulator_unregister(rdev);
291 platform_set_drvdata(pdev, NULL);
291 292
292 return 0; 293 return 0;
293} 294}
294 295
295static struct platform_driver pcap_regulator_driver = { 296static struct platform_driver pcap_regulator_driver = {
296 .driver = { 297 .driver = {
297 .name = "pcap-regulator", 298 .name = "pcap-regulator",
299 .owner = THIS_MODULE,
298 }, 300 },
299 .probe = pcap_regulator_probe, 301 .probe = pcap_regulator_probe,
300 .remove = __devexit_p(pcap_regulator_remove), 302 .remove = __devexit_p(pcap_regulator_remove),
301}; 303};
302 304
303static int __init pcap_regulator_init(void) 305static int __init pcap_regulator_init(void)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 07fda0a75adf..1f183543bdbd 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -457,8 +457,8 @@ static struct regulator_ops tps65023_ldo_ops = {
457 .list_voltage = tps65023_ldo_list_voltage, 457 .list_voltage = tps65023_ldo_list_voltage,
458}; 458};
459 459
460static 460static int __devinit tps_65023_probe(struct i2c_client *client,
461int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id) 461 const struct i2c_device_id *id)
462{ 462{
463 static int desc_id; 463 static int desc_id;
464 const struct tps_info *info = (void *)id->driver_data; 464 const struct tps_info *info = (void *)id->driver_data;
@@ -466,6 +466,7 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
466 struct regulator_dev *rdev; 466 struct regulator_dev *rdev;
467 struct tps_pmic *tps; 467 struct tps_pmic *tps;
468 int i; 468 int i;
469 int error;
469 470
470 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 471 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
471 return -EIO; 472 return -EIO;
@@ -475,7 +476,6 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
475 * coming from the board-evm file. 476 * coming from the board-evm file.
476 */ 477 */
477 init_data = client->dev.platform_data; 478 init_data = client->dev.platform_data;
478
479 if (!init_data) 479 if (!init_data)
480 return -EIO; 480 return -EIO;
481 481
@@ -502,21 +502,12 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
502 502
503 /* Register the regulators */ 503 /* Register the regulators */
504 rdev = regulator_register(&tps->desc[i], &client->dev, 504 rdev = regulator_register(&tps->desc[i], &client->dev,
505 init_data, tps); 505 init_data, tps);
506 if (IS_ERR(rdev)) { 506 if (IS_ERR(rdev)) {
507 dev_err(&client->dev, "failed to register %s\n", 507 dev_err(&client->dev, "failed to register %s\n",
508 id->name); 508 id->name);
509 509 error = PTR_ERR(rdev);
510 /* Unregister */ 510 goto fail;
511 while (i)
512 regulator_unregister(tps->rdev[--i]);
513
514 tps->client = NULL;
515
516 /* clear the client data in i2c */
517 i2c_set_clientdata(client, NULL);
518 kfree(tps);
519 return PTR_ERR(rdev);
520 } 511 }
521 512
522 /* Save regulator for cleanup */ 513 /* Save regulator for cleanup */
@@ -526,6 +517,13 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
526 i2c_set_clientdata(client, tps); 517 i2c_set_clientdata(client, tps);
527 518
528 return 0; 519 return 0;
520
521 fail:
522 while (--i >= 0)
523 regulator_unregister(tps->rdev[i]);
524
525 kfree(tps);
526 return error;
529} 527}
530 528
531/** 529/**
@@ -539,13 +537,12 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
539 struct tps_pmic *tps = i2c_get_clientdata(client); 537 struct tps_pmic *tps = i2c_get_clientdata(client);
540 int i; 538 int i;
541 539
540 /* clear the client data in i2c */
541 i2c_set_clientdata(client, NULL);
542
542 for (i = 0; i < TPS65023_NUM_REGULATOR; i++) 543 for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
543 regulator_unregister(tps->rdev[i]); 544 regulator_unregister(tps->rdev[i]);
544 545
545 tps->client = NULL;
546
547 /* clear the client data in i2c */
548 i2c_set_clientdata(client, NULL);
549 kfree(tps); 546 kfree(tps);
550 547
551 return 0; 548 return 0;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index f8a6dfbef751..c2a9539acd72 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -538,8 +538,8 @@ static struct regulator_ops tps6507x_ldo_ops = {
538 .list_voltage = tps6507x_ldo_list_voltage, 538 .list_voltage = tps6507x_ldo_list_voltage,
539}; 539};
540 540
541static 541static int __devinit tps_6507x_probe(struct i2c_client *client,
542int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id) 542 const struct i2c_device_id *id)
543{ 543{
544 static int desc_id; 544 static int desc_id;
545 const struct tps_info *info = (void *)id->driver_data; 545 const struct tps_info *info = (void *)id->driver_data;
@@ -547,6 +547,7 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
547 struct regulator_dev *rdev; 547 struct regulator_dev *rdev;
548 struct tps_pmic *tps; 548 struct tps_pmic *tps;
549 int i; 549 int i;
550 int error;
550 551
551 if (!i2c_check_functionality(client->adapter, 552 if (!i2c_check_functionality(client->adapter,
552 I2C_FUNC_SMBUS_BYTE_DATA)) 553 I2C_FUNC_SMBUS_BYTE_DATA))
@@ -557,7 +558,6 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
557 * coming from the board-evm file. 558 * coming from the board-evm file.
558 */ 559 */
559 init_data = client->dev.platform_data; 560 init_data = client->dev.platform_data;
560
561 if (!init_data) 561 if (!init_data)
562 return -EIO; 562 return -EIO;
563 563
@@ -586,18 +586,8 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
586 if (IS_ERR(rdev)) { 586 if (IS_ERR(rdev)) {
587 dev_err(&client->dev, "failed to register %s\n", 587 dev_err(&client->dev, "failed to register %s\n",
588 id->name); 588 id->name);
589 589 error = PTR_ERR(rdev);
590 /* Unregister */ 590 goto fail;
591 while (i)
592 regulator_unregister(tps->rdev[--i]);
593
594 tps->client = NULL;
595
596 /* clear the client data in i2c */
597 i2c_set_clientdata(client, NULL);
598
599 kfree(tps);
600 return PTR_ERR(rdev);
601 } 591 }
602 592
603 /* Save regulator for cleanup */ 593 /* Save regulator for cleanup */
@@ -607,6 +597,13 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
607 i2c_set_clientdata(client, tps); 597 i2c_set_clientdata(client, tps);
608 598
609 return 0; 599 return 0;
600
601fail:
602 while (--i >= 0)
603 regulator_unregister(tps->rdev[i]);
604
605 kfree(tps);
606 return error;
610} 607}
611 608
612/** 609/**
@@ -620,13 +617,12 @@ static int __devexit tps_6507x_remove(struct i2c_client *client)
620 struct tps_pmic *tps = i2c_get_clientdata(client); 617 struct tps_pmic *tps = i2c_get_clientdata(client);
621 int i; 618 int i;
622 619
620 /* clear the client data in i2c */
621 i2c_set_clientdata(client, NULL);
622
623 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++) 623 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
624 regulator_unregister(tps->rdev[i]); 624 regulator_unregister(tps->rdev[i]);
625 625
626 tps->client = NULL;
627
628 /* clear the client data in i2c */
629 i2c_set_clientdata(client, NULL);
630 kfree(tps); 626 kfree(tps);
631 627
632 return 0; 628 return 0;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7e674859bd59..9729d760fb4d 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -519,19 +519,19 @@ static struct twlreg_info twl_regs[] = {
519 /* 6030 REG with base as PMC Slave Misc : 0x0030 */ 519 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
520 /* Turnon-delay and remap configuration values for 6030 are not 520 /* Turnon-delay and remap configuration values for 6030 are not
521 verified since the specification is not public */ 521 verified since the specification is not public */
522 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08), 522 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x21),
523 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08), 523 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x21),
524 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08), 524 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x21),
525 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08), 525 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x21),
526 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08), 526 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x21),
527 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08), 527 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x21),
528 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08), 528 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
529 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08), 529 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
530 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08), 530 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
531 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08) 531 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21)
532}; 532};
533 533
534static int twlreg_probe(struct platform_device *pdev) 534static int __devinit twlreg_probe(struct platform_device *pdev)
535{ 535{
536 int i; 536 int i;
537 struct twlreg_info *info; 537 struct twlreg_info *info;
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index addc032c84bf..d96cecaac73d 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -19,7 +19,7 @@
19struct virtual_consumer_data { 19struct virtual_consumer_data {
20 struct mutex lock; 20 struct mutex lock;
21 struct regulator *regulator; 21 struct regulator *regulator;
22 int enabled; 22 bool enabled;
23 int min_uV; 23 int min_uV;
24 int max_uV; 24 int max_uV;
25 int min_uA; 25 int min_uA;
@@ -49,7 +49,7 @@ static void update_voltage_constraints(struct device *dev,
49 dev_dbg(dev, "Enabling regulator\n"); 49 dev_dbg(dev, "Enabling regulator\n");
50 ret = regulator_enable(data->regulator); 50 ret = regulator_enable(data->regulator);
51 if (ret == 0) 51 if (ret == 0)
52 data->enabled = 1; 52 data->enabled = true;
53 else 53 else
54 dev_err(dev, "regulator_enable() failed: %d\n", 54 dev_err(dev, "regulator_enable() failed: %d\n",
55 ret); 55 ret);
@@ -59,7 +59,7 @@ static void update_voltage_constraints(struct device *dev,
59 dev_dbg(dev, "Disabling regulator\n"); 59 dev_dbg(dev, "Disabling regulator\n");
60 ret = regulator_disable(data->regulator); 60 ret = regulator_disable(data->regulator);
61 if (ret == 0) 61 if (ret == 0)
62 data->enabled = 0; 62 data->enabled = false;
63 else 63 else
64 dev_err(dev, "regulator_disable() failed: %d\n", 64 dev_err(dev, "regulator_disable() failed: %d\n",
65 ret); 65 ret);
@@ -89,7 +89,7 @@ static void update_current_limit_constraints(struct device *dev,
89 dev_dbg(dev, "Enabling regulator\n"); 89 dev_dbg(dev, "Enabling regulator\n");
90 ret = regulator_enable(data->regulator); 90 ret = regulator_enable(data->regulator);
91 if (ret == 0) 91 if (ret == 0)
92 data->enabled = 1; 92 data->enabled = true;
93 else 93 else
94 dev_err(dev, "regulator_enable() failed: %d\n", 94 dev_err(dev, "regulator_enable() failed: %d\n",
95 ret); 95 ret);
@@ -99,7 +99,7 @@ static void update_current_limit_constraints(struct device *dev,
99 dev_dbg(dev, "Disabling regulator\n"); 99 dev_dbg(dev, "Disabling regulator\n");
100 ret = regulator_disable(data->regulator); 100 ret = regulator_disable(data->regulator);
101 if (ret == 0) 101 if (ret == 0)
102 data->enabled = 0; 102 data->enabled = false;
103 else 103 else
104 dev_err(dev, "regulator_disable() failed: %d\n", 104 dev_err(dev, "regulator_disable() failed: %d\n",
105 ret); 105 ret);
@@ -270,24 +270,28 @@ static DEVICE_ATTR(min_microamps, 0666, show_min_uA, set_min_uA);
270static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA); 270static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA);
271static DEVICE_ATTR(mode, 0666, show_mode, set_mode); 271static DEVICE_ATTR(mode, 0666, show_mode, set_mode);
272 272
273static struct device_attribute *attributes[] = { 273static struct attribute *regulator_virtual_attributes[] = {
274 &dev_attr_min_microvolts, 274 &dev_attr_min_microvolts.attr,
275 &dev_attr_max_microvolts, 275 &dev_attr_max_microvolts.attr,
276 &dev_attr_min_microamps, 276 &dev_attr_min_microamps.attr,
277 &dev_attr_max_microamps, 277 &dev_attr_max_microamps.attr,
278 &dev_attr_mode, 278 &dev_attr_mode.attr,
279 NULL
279}; 280};
280 281
281static int regulator_virtual_consumer_probe(struct platform_device *pdev) 282static const struct attribute_group regulator_virtual_attr_group = {
283 .attrs = regulator_virtual_attributes,
284};
285
286static int __devinit regulator_virtual_probe(struct platform_device *pdev)
282{ 287{
283 char *reg_id = pdev->dev.platform_data; 288 char *reg_id = pdev->dev.platform_data;
284 struct virtual_consumer_data *drvdata; 289 struct virtual_consumer_data *drvdata;
285 int ret, i; 290 int ret;
286 291
287 drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL); 292 drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL);
288 if (drvdata == NULL) { 293 if (drvdata == NULL)
289 return -ENOMEM; 294 return -ENOMEM;
290 }
291 295
292 mutex_init(&drvdata->lock); 296 mutex_init(&drvdata->lock);
293 297
@@ -299,13 +303,12 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
299 goto err; 303 goto err;
300 } 304 }
301 305
302 for (i = 0; i < ARRAY_SIZE(attributes); i++) { 306 ret = sysfs_create_group(&pdev->dev.kobj,
303 ret = device_create_file(&pdev->dev, attributes[i]); 307 &regulator_virtual_attr_group);
304 if (ret != 0) { 308 if (ret != 0) {
305 dev_err(&pdev->dev, "Failed to create attr %d: %d\n", 309 dev_err(&pdev->dev,
306 i, ret); 310 "Failed to create attribute group: %d\n", ret);
307 goto err_regulator; 311 goto err_regulator;
308 }
309 } 312 }
310 313
311 drvdata->mode = regulator_get_mode(drvdata->regulator); 314 drvdata->mode = regulator_get_mode(drvdata->regulator);
@@ -317,37 +320,36 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
317err_regulator: 320err_regulator:
318 regulator_put(drvdata->regulator); 321 regulator_put(drvdata->regulator);
319err: 322err:
320 for (i = 0; i < ARRAY_SIZE(attributes); i++)
321 device_remove_file(&pdev->dev, attributes[i]);
322 kfree(drvdata); 323 kfree(drvdata);
323 return ret; 324 return ret;
324} 325}
325 326
326static int regulator_virtual_consumer_remove(struct platform_device *pdev) 327static int __devexit regulator_virtual_remove(struct platform_device *pdev)
327{ 328{
328 struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev); 329 struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
329 int i;
330 330
331 for (i = 0; i < ARRAY_SIZE(attributes); i++) 331 sysfs_remove_group(&pdev->dev.kobj, &regulator_virtual_attr_group);
332 device_remove_file(&pdev->dev, attributes[i]); 332
333 if (drvdata->enabled) 333 if (drvdata->enabled)
334 regulator_disable(drvdata->regulator); 334 regulator_disable(drvdata->regulator);
335 regulator_put(drvdata->regulator); 335 regulator_put(drvdata->regulator);
336 336
337 kfree(drvdata); 337 kfree(drvdata);
338 338
339 platform_set_drvdata(pdev, NULL);
340
339 return 0; 341 return 0;
340} 342}
341 343
342static struct platform_driver regulator_virtual_consumer_driver = { 344static struct platform_driver regulator_virtual_consumer_driver = {
343 .probe = regulator_virtual_consumer_probe, 345 .probe = regulator_virtual_probe,
344 .remove = regulator_virtual_consumer_remove, 346 .remove = __devexit_p(regulator_virtual_remove),
345 .driver = { 347 .driver = {
346 .name = "reg-virt-consumer", 348 .name = "reg-virt-consumer",
349 .owner = THIS_MODULE,
347 }, 350 },
348}; 351};
349 352
350
351static int __init regulator_virtual_consumer_init(void) 353static int __init regulator_virtual_consumer_init(void)
352{ 354{
353 return platform_driver_register(&regulator_virtual_consumer_driver); 355 return platform_driver_register(&regulator_virtual_consumer_driver);
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 0a6577577e8d..6e18e56d850b 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -600,6 +600,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
600 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 600 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
601 struct wm831x *wm831x = dcdc->wm831x; 601 struct wm831x *wm831x = dcdc->wm831x;
602 602
603 platform_set_drvdata(pdev, NULL);
604
603 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc); 605 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
604 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); 606 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
605 regulator_unregister(dcdc->regulator); 607 regulator_unregister(dcdc->regulator);
@@ -615,6 +617,7 @@ static struct platform_driver wm831x_buckv_driver = {
615 .remove = __devexit_p(wm831x_buckv_remove), 617 .remove = __devexit_p(wm831x_buckv_remove),
616 .driver = { 618 .driver = {
617 .name = "wm831x-buckv", 619 .name = "wm831x-buckv",
620 .owner = THIS_MODULE,
618 }, 621 },
619}; 622};
620 623
@@ -769,6 +772,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
769 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 772 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
770 struct wm831x *wm831x = dcdc->wm831x; 773 struct wm831x *wm831x = dcdc->wm831x;
771 774
775 platform_set_drvdata(pdev, NULL);
776
772 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); 777 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
773 regulator_unregister(dcdc->regulator); 778 regulator_unregister(dcdc->regulator);
774 kfree(dcdc); 779 kfree(dcdc);
@@ -781,6 +786,7 @@ static struct platform_driver wm831x_buckp_driver = {
781 .remove = __devexit_p(wm831x_buckp_remove), 786 .remove = __devexit_p(wm831x_buckp_remove),
782 .driver = { 787 .driver = {
783 .name = "wm831x-buckp", 788 .name = "wm831x-buckp",
789 .owner = THIS_MODULE,
784 }, 790 },
785}; 791};
786 792
@@ -895,6 +901,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
895 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 901 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
896 struct wm831x *wm831x = dcdc->wm831x; 902 struct wm831x *wm831x = dcdc->wm831x;
897 903
904 platform_set_drvdata(pdev, NULL);
905
898 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); 906 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
899 regulator_unregister(dcdc->regulator); 907 regulator_unregister(dcdc->regulator);
900 kfree(dcdc); 908 kfree(dcdc);
@@ -907,6 +915,7 @@ static struct platform_driver wm831x_boostp_driver = {
907 .remove = __devexit_p(wm831x_boostp_remove), 915 .remove = __devexit_p(wm831x_boostp_remove),
908 .driver = { 916 .driver = {
909 .name = "wm831x-boostp", 917 .name = "wm831x-boostp",
918 .owner = THIS_MODULE,
910 }, 919 },
911}; 920};
912 921
@@ -979,6 +988,8 @@ static __devexit int wm831x_epe_remove(struct platform_device *pdev)
979{ 988{
980 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 989 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
981 990
991 platform_set_drvdata(pdev, NULL);
992
982 regulator_unregister(dcdc->regulator); 993 regulator_unregister(dcdc->regulator);
983 kfree(dcdc); 994 kfree(dcdc);
984 995
@@ -990,6 +1001,7 @@ static struct platform_driver wm831x_epe_driver = {
990 .remove = __devexit_p(wm831x_epe_remove), 1001 .remove = __devexit_p(wm831x_epe_remove),
991 .driver = { 1002 .driver = {
992 .name = "wm831x-epe", 1003 .name = "wm831x-epe",
1004 .owner = THIS_MODULE,
993 }, 1005 },
994}; 1006};
995 1007
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 48857008758c..ca0f6b6c384b 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -222,6 +222,8 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
222 struct wm831x_isink *isink = platform_get_drvdata(pdev); 222 struct wm831x_isink *isink = platform_get_drvdata(pdev);
223 struct wm831x *wm831x = isink->wm831x; 223 struct wm831x *wm831x = isink->wm831x;
224 224
225 platform_set_drvdata(pdev, NULL);
226
225 wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink); 227 wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink);
226 228
227 regulator_unregister(isink->regulator); 229 regulator_unregister(isink->regulator);
@@ -235,6 +237,7 @@ static struct platform_driver wm831x_isink_driver = {
235 .remove = __devexit_p(wm831x_isink_remove), 237 .remove = __devexit_p(wm831x_isink_remove),
236 .driver = { 238 .driver = {
237 .name = "wm831x-isink", 239 .name = "wm831x-isink",
240 .owner = THIS_MODULE,
238 }, 241 },
239}; 242};
240 243
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 61e02ac2fda3..d2406c1519a1 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -371,6 +371,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
371 struct wm831x_ldo *ldo = platform_get_drvdata(pdev); 371 struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
372 struct wm831x *wm831x = ldo->wm831x; 372 struct wm831x *wm831x = ldo->wm831x;
373 373
374 platform_set_drvdata(pdev, NULL);
375
374 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); 376 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo);
375 regulator_unregister(ldo->regulator); 377 regulator_unregister(ldo->regulator);
376 kfree(ldo); 378 kfree(ldo);
@@ -383,6 +385,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
383 .remove = __devexit_p(wm831x_gp_ldo_remove), 385 .remove = __devexit_p(wm831x_gp_ldo_remove),
384 .driver = { 386 .driver = {
385 .name = "wm831x-ldo", 387 .name = "wm831x-ldo",
388 .owner = THIS_MODULE,
386 }, 389 },
387}; 390};
388 391
@@ -640,6 +643,7 @@ static struct platform_driver wm831x_aldo_driver = {
640 .remove = __devexit_p(wm831x_aldo_remove), 643 .remove = __devexit_p(wm831x_aldo_remove),
641 .driver = { 644 .driver = {
642 .name = "wm831x-aldo", 645 .name = "wm831x-aldo",
646 .owner = THIS_MODULE,
643 }, 647 },
644}; 648};
645 649
@@ -811,6 +815,7 @@ static struct platform_driver wm831x_alive_ldo_driver = {
811 .remove = __devexit_p(wm831x_alive_ldo_remove), 815 .remove = __devexit_p(wm831x_alive_ldo_remove),
812 .driver = { 816 .driver = {
813 .name = "wm831x-alive-ldo", 817 .name = "wm831x-alive-ldo",
818 .owner = THIS_MODULE,
814 }, 819 },
815}; 820};
816 821
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index e7b89e704af6..94227dd6ba7b 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -290,6 +290,51 @@ static int wm8350_isink_is_enabled(struct regulator_dev *rdev)
290 return -EINVAL; 290 return -EINVAL;
291} 291}
292 292
293static int wm8350_isink_enable_time(struct regulator_dev *rdev)
294{
295 struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
296 int isink = rdev_get_id(rdev);
297 int reg;
298
299 switch (isink) {
300 case WM8350_ISINK_A:
301 reg = wm8350_reg_read(wm8350, WM8350_CSA_FLASH_CONTROL);
302 break;
303 case WM8350_ISINK_B:
304 reg = wm8350_reg_read(wm8350, WM8350_CSB_FLASH_CONTROL);
305 break;
306 default:
307 return -EINVAL;
308 }
309
310 if (reg & WM8350_CS1_FLASH_MODE) {
311 switch (reg & WM8350_CS1_ON_RAMP_MASK) {
312 case 0:
313 return 0;
314 case 1:
315 return 1950;
316 case 2:
317 return 3910;
318 case 3:
319 return 7800;
320 }
321 } else {
322 switch (reg & WM8350_CS1_ON_RAMP_MASK) {
323 case 0:
324 return 0;
325 case 1:
326 return 250000;
327 case 2:
328 return 500000;
329 case 3:
330 return 1000000;
331 }
332 }
333
334 return -EINVAL;
335}
336
337
293int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode, 338int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
294 u16 trigger, u16 duration, u16 on_ramp, u16 off_ramp, 339 u16 trigger, u16 duration, u16 on_ramp, u16 off_ramp,
295 u16 drive) 340 u16 drive)
@@ -1221,6 +1266,7 @@ static struct regulator_ops wm8350_isink_ops = {
1221 .enable = wm8350_isink_enable, 1266 .enable = wm8350_isink_enable,
1222 .disable = wm8350_isink_disable, 1267 .disable = wm8350_isink_disable,
1223 .is_enabled = wm8350_isink_is_enabled, 1268 .is_enabled = wm8350_isink_is_enabled,
1269 .enable_time = wm8350_isink_enable_time,
1224}; 1270};
1225 1271
1226static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { 1272static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index d9a2c988c6e7..924c7eb29ee9 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -317,14 +317,17 @@ static struct regulator_desc regulators[] = {
317 317
318static int __devinit wm8400_regulator_probe(struct platform_device *pdev) 318static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
319{ 319{
320 struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]);
320 struct regulator_dev *rdev; 321 struct regulator_dev *rdev;
321 322
322 rdev = regulator_register(&regulators[pdev->id], &pdev->dev, 323 rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
323 pdev->dev.platform_data, dev_get_drvdata(&pdev->dev)); 324 pdev->dev.platform_data, wm8400);
324 325
325 if (IS_ERR(rdev)) 326 if (IS_ERR(rdev))
326 return PTR_ERR(rdev); 327 return PTR_ERR(rdev);
327 328
329 platform_set_drvdata(pdev, rdev);
330
328 return 0; 331 return 0;
329} 332}
330 333
@@ -332,6 +335,7 @@ static int __devexit wm8400_regulator_remove(struct platform_device *pdev)
332{ 335{
333 struct regulator_dev *rdev = platform_get_drvdata(pdev); 336 struct regulator_dev *rdev = platform_get_drvdata(pdev);
334 337
338 platform_set_drvdata(pdev, NULL);
335 regulator_unregister(rdev); 339 regulator_unregister(rdev);
336 340
337 return 0; 341 return 0;
@@ -370,7 +374,6 @@ int wm8400_register_regulator(struct device *dev, int reg,
370 wm8400->regulators[reg].id = reg; 374 wm8400->regulators[reg].id = reg;
371 wm8400->regulators[reg].dev.parent = dev; 375 wm8400->regulators[reg].dev.parent = dev;
372 wm8400->regulators[reg].dev.platform_data = initdata; 376 wm8400->regulators[reg].dev.platform_data = initdata;
373 dev_set_drvdata(&wm8400->regulators[reg].dev, wm8400);
374 377
375 return platform_device_register(&wm8400->regulators[reg]); 378 return platform_device_register(&wm8400->regulators[reg]);
376} 379}
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
new file mode 100644
index 000000000000..95454a4637b7
--- /dev/null
+++ b/drivers/regulator/wm8994-regulator.c
@@ -0,0 +1,307 @@
1/*
2 * wm8994-regulator.c -- Regulator driver for the WM8994
3 *
4 * Copyright 2009 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/bitops.h>
18#include <linux/err.h>
19#include <linux/platform_device.h>
20#include <linux/regulator/driver.h>
21#include <linux/gpio.h>
22
23#include <linux/mfd/wm8994/core.h>
24#include <linux/mfd/wm8994/registers.h>
25#include <linux/mfd/wm8994/pdata.h>
26
27struct wm8994_ldo {
28 int enable;
29 bool is_enabled;
30 struct regulator_dev *regulator;
31 struct wm8994 *wm8994;
32};
33
34#define WM8994_LDO1_MAX_SELECTOR 0x7
35#define WM8994_LDO2_MAX_SELECTOR 0x3
36
37static int wm8994_ldo_enable(struct regulator_dev *rdev)
38{
39 struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
40
41 /* If we have no soft control assume that the LDO is always enabled. */
42 if (!ldo->enable)
43 return 0;
44
45 gpio_set_value(ldo->enable, 1);
46 ldo->is_enabled = true;
47
48 return 0;
49}
50
51static int wm8994_ldo_disable(struct regulator_dev *rdev)
52{
53 struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
54
55 /* If we have no soft control assume that the LDO is always enabled. */
56 if (!ldo->enable)
57 return -EINVAL;
58
59 gpio_set_value(ldo->enable, 0);
60 ldo->is_enabled = false;
61
62 return 0;
63}
64
65static int wm8994_ldo_is_enabled(struct regulator_dev *rdev)
66{
67 struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
68
69 return ldo->is_enabled;
70}
71
72static int wm8994_ldo_enable_time(struct regulator_dev *rdev)
73{
74 /* 3ms is fairly conservative but this shouldn't be too performance
75 * critical; can be tweaked per-system if required. */
76 return 3000;
77}
78
79static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev,
80 unsigned int selector)
81{
82 if (selector > WM8994_LDO1_MAX_SELECTOR)
83 return -EINVAL;
84
85 return (selector * 100000) + 2400000;
86}
87
88static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
89{
90 struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
91 int val;
92
93 val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_1);
94 if (val < 0)
95 return val;
96
97 val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
98
99 return wm8994_ldo1_list_voltage(rdev, val);
100}
101
102static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
103 int min_uV, int max_uV)
104{
105 struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
106 int selector, v;
107
108 selector = (min_uV - 2400000) / 100000;
109 v = wm8994_ldo1_list_voltage(rdev, selector);
110 if (v < 0 || v > max_uV)
111 return -EINVAL;
112
113 selector <<= WM8994_LDO1_VSEL_SHIFT;
114
115 return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
116 WM8994_LDO1_VSEL_MASK, selector);
117}
118
119static struct regulator_ops wm8994_ldo1_ops = {
120 .enable = wm8994_ldo_enable,
121 .disable = wm8994_ldo_disable,
122 .is_enabled = wm8994_ldo_is_enabled,
123 .enable_time = wm8994_ldo_enable_time,
124
125 .list_voltage = wm8994_ldo1_list_voltage,
126 .get_voltage = wm8994_ldo1_get_voltage,
127 .set_voltage = wm8994_ldo1_set_voltage,
128};
129
130static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
131 unsigned int selector)
132{
133 if (selector > WM8994_LDO2_MAX_SELECTOR)
134 return -EINVAL;
135
136 return (selector * 100000) + 900000;
137}
138
139static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
140{
141 struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
142 int val;
143
144 val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_2);
145 if (val < 0)
146 return val;
147
148 val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
149
150 return wm8994_ldo2_list_voltage(rdev, val);
151}
152
153static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
154 int min_uV, int max_uV)
155{
156 struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
157 int selector, v;
158
159 selector = (min_uV - 900000) / 100000;
160 v = wm8994_ldo2_list_voltage(rdev, selector);
161 if (v < 0 || v > max_uV)
162 return -EINVAL;
163
164 selector <<= WM8994_LDO2_VSEL_SHIFT;
165
166 return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
167 WM8994_LDO2_VSEL_MASK, selector);
168}
169
170static struct regulator_ops wm8994_ldo2_ops = {
171 .enable = wm8994_ldo_enable,
172 .disable = wm8994_ldo_disable,
173 .is_enabled = wm8994_ldo_is_enabled,
174 .enable_time = wm8994_ldo_enable_time,
175
176 .list_voltage = wm8994_ldo2_list_voltage,
177 .get_voltage = wm8994_ldo2_get_voltage,
178 .set_voltage = wm8994_ldo2_set_voltage,
179};
180
181static struct regulator_desc wm8994_ldo_desc[] = {
182 {
183 .name = "LDO1",
184 .id = 1,
185 .type = REGULATOR_VOLTAGE,
186 .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
187 .ops = &wm8994_ldo1_ops,
188 .owner = THIS_MODULE,
189 },
190 {
191 .name = "LDO2",
192 .id = 2,
193 .type = REGULATOR_VOLTAGE,
194 .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
195 .ops = &wm8994_ldo2_ops,
196 .owner = THIS_MODULE,
197 },
198};
199
200static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
201{
202 struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
203 struct wm8994_pdata *pdata = wm8994->dev->platform_data;
204 int id = pdev->id % ARRAY_SIZE(pdata->ldo);
205 struct wm8994_ldo *ldo;
206 int ret;
207
208 dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
209
210 if (!pdata)
211 return -ENODEV;
212
213 ldo = kzalloc(sizeof(struct wm8994_ldo), GFP_KERNEL);
214 if (ldo == NULL) {
215 dev_err(&pdev->dev, "Unable to allocate private data\n");
216 return -ENOMEM;
217 }
218
219 ldo->wm8994 = wm8994;
220
221 ldo->is_enabled = true;
222
223 if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
224 ldo->enable = pdata->ldo[id].enable;
225
226 ret = gpio_request(ldo->enable, "WM8994 LDO enable");
227 if (ret < 0) {
228 dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n",
229 ret);
230 goto err;
231 }
232
233 ret = gpio_direction_output(ldo->enable, ldo->is_enabled);
234 if (ret < 0) {
235 dev_err(&pdev->dev, "Failed to set GPIO up: %d\n",
236 ret);
237 goto err_gpio;
238 }
239 }
240
241 ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
242 pdata->ldo[id].init_data, ldo);
243 if (IS_ERR(ldo->regulator)) {
244 ret = PTR_ERR(ldo->regulator);
245 dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
246 id + 1, ret);
247 goto err_gpio;
248 }
249
250 platform_set_drvdata(pdev, ldo);
251
252 return 0;
253
254err_gpio:
255 if (gpio_is_valid(ldo->enable))
256 gpio_free(ldo->enable);
257err:
258 kfree(ldo);
259 return ret;
260}
261
262static __devexit int wm8994_ldo_remove(struct platform_device *pdev)
263{
264 struct wm8994_ldo *ldo = platform_get_drvdata(pdev);
265
266 platform_set_drvdata(pdev, NULL);
267
268 regulator_unregister(ldo->regulator);
269 if (gpio_is_valid(ldo->enable))
270 gpio_free(ldo->enable);
271 kfree(ldo);
272
273 return 0;
274}
275
276static struct platform_driver wm8994_ldo_driver = {
277 .probe = wm8994_ldo_probe,
278 .remove = __devexit_p(wm8994_ldo_remove),
279 .driver = {
280 .name = "wm8994-ldo",
281 .owner = THIS_MODULE,
282 },
283};
284
285static int __init wm8994_ldo_init(void)
286{
287 int ret;
288
289 ret = platform_driver_register(&wm8994_ldo_driver);
290 if (ret != 0)
291 pr_err("Failed to register Wm8994 GP LDO driver: %d\n", ret);
292
293 return ret;
294}
295subsys_initcall(wm8994_ldo_init);
296
297static void __exit wm8994_ldo_exit(void)
298{
299 platform_driver_unregister(&wm8994_ldo_driver);
300}
301module_exit(wm8994_ldo_exit);
302
303/* Module information */
304MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
305MODULE_DESCRIPTION("WM8994 LDO driver");
306MODULE_LICENSE("GPL");
307MODULE_ALIAS("platform:wm8994-ldo");
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index d935b2d04f93..ae0251ef6f4e 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -153,8 +153,6 @@ static int baud_table[] = {
153 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 153 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
154 9600, 19200, 38400, 57600, 115200, 0 }; 154 9600, 19200, 38400, 57600, 115200, 0 };
155 155
156#define BAUD_TABLE_SIZE (sizeof(baud_table)/sizeof(baud_table[0]))
157
158/* Sets or clears DTR/RTS on the requested line */ 156/* Sets or clears DTR/RTS on the requested line */
159static inline void m68k_rtsdtr(struct m68k_serial *ss, int set) 157static inline void m68k_rtsdtr(struct m68k_serial *ss, int set)
160{ 158{
@@ -1406,10 +1404,10 @@ static void m68328_set_baud(void)
1406 USTCNT = ustcnt & ~USTCNT_TXEN; 1404 USTCNT = ustcnt & ~USTCNT_TXEN;
1407 1405
1408again: 1406again:
1409 for (i = 0; i < sizeof(baud_table) / sizeof(baud_table[0]); i++) 1407 for (i = 0; i < ARRAY_SIZE(baud_table); i++)
1410 if (baud_table[i] == m68328_console_baud) 1408 if (baud_table[i] == m68328_console_baud)
1411 break; 1409 break;
1412 if (i >= sizeof(baud_table) / sizeof(baud_table[0])) { 1410 if (i >= ARRAY_SIZE(baud_table)) {
1413 m68328_console_baud = 9600; 1411 m68328_console_baud = 9600;
1414 goto again; 1412 goto again;
1415 } 1413 }
@@ -1435,7 +1433,7 @@ int m68328_console_setup(struct console *cp, char *arg)
1435 if (arg) 1433 if (arg)
1436 n = simple_strtoul(arg,NULL,0); 1434 n = simple_strtoul(arg,NULL,0);
1437 1435
1438 for (i = 0; i < BAUD_TABLE_SIZE; i++) 1436 for (i = 0; i < ARRAY_SIZE(baud_table); i++)
1439 if (baud_table[i] == n) 1437 if (baud_table[i] == n)
1440 break; 1438 break;
1441 if (i < BAUD_TABLE_SIZE) { 1439 if (i < BAUD_TABLE_SIZE) {
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index a81ff7bc5fa1..7c4ebe6ee18b 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2690,6 +2690,15 @@ static void __init serial8250_isa_init_ports(void)
2690 } 2690 }
2691} 2691}
2692 2692
2693static void
2694serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
2695{
2696 up->port.type = type;
2697 up->port.fifosize = uart_config[type].fifo_size;
2698 up->capabilities = uart_config[type].flags;
2699 up->tx_loadsz = uart_config[type].tx_loadsz;
2700}
2701
2693static void __init 2702static void __init
2694serial8250_register_ports(struct uart_driver *drv, struct device *dev) 2703serial8250_register_ports(struct uart_driver *drv, struct device *dev)
2695{ 2704{
@@ -2706,6 +2715,10 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
2706 struct uart_8250_port *up = &serial8250_ports[i]; 2715 struct uart_8250_port *up = &serial8250_ports[i];
2707 2716
2708 up->port.dev = dev; 2717 up->port.dev = dev;
2718
2719 if (up->port.flags & UPF_FIXED_TYPE)
2720 serial8250_init_fixed_type_port(up, up->port.type);
2721
2709 uart_add_one_port(drv, &up->port); 2722 uart_add_one_port(drv, &up->port);
2710 } 2723 }
2711} 2724}
@@ -3118,12 +3131,8 @@ int serial8250_register_port(struct uart_port *port)
3118 if (port->dev) 3131 if (port->dev)
3119 uart->port.dev = port->dev; 3132 uart->port.dev = port->dev;
3120 3133
3121 if (port->flags & UPF_FIXED_TYPE) { 3134 if (port->flags & UPF_FIXED_TYPE)
3122 uart->port.type = port->type; 3135 serial8250_init_fixed_type_port(uart, port->type);
3123 uart->port.fifosize = uart_config[port->type].fifo_size;
3124 uart->capabilities = uart_config[port->type].flags;
3125 uart->tx_loadsz = uart_config[port->type].tx_loadsz;
3126 }
3127 3136
3128 set_io_from_upio(&uart->port); 3137 set_io_from_upio(&uart->port);
3129 /* Possibly override default I/O functions. */ 3138 /* Possibly override default I/O functions. */
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index b28af13c45a1..01c012da4e26 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -760,7 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev)
760 /* subdevice 0x00PS means <P> parallel, <S> serial */ 760 /* subdevice 0x00PS means <P> parallel, <S> serial */
761 unsigned int num_serial = dev->subsystem_device & 0xf; 761 unsigned int num_serial = dev->subsystem_device & 0xf;
762 762
763 if (dev->device == PCI_DEVICE_ID_NETMOS_9901) 763 if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) ||
764 (dev->device == PCI_DEVICE_ID_NETMOS_9865))
764 return 0; 765 return 0;
765 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && 766 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
766 dev->subsystem_device == 0x0299) 767 dev->subsystem_device == 0x0299)
@@ -1479,6 +1480,7 @@ enum pci_board_num_t {
1479 1480
1480 pbn_b0_bt_1_115200, 1481 pbn_b0_bt_1_115200,
1481 pbn_b0_bt_2_115200, 1482 pbn_b0_bt_2_115200,
1483 pbn_b0_bt_4_115200,
1482 pbn_b0_bt_8_115200, 1484 pbn_b0_bt_8_115200,
1483 1485
1484 pbn_b0_bt_1_460800, 1486 pbn_b0_bt_1_460800,
@@ -1703,6 +1705,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1703 .base_baud = 115200, 1705 .base_baud = 115200,
1704 .uart_offset = 8, 1706 .uart_offset = 8,
1705 }, 1707 },
1708 [pbn_b0_bt_4_115200] = {
1709 .flags = FL_BASE0|FL_BASE_BARS,
1710 .num_ports = 4,
1711 .base_baud = 115200,
1712 .uart_offset = 8,
1713 },
1706 [pbn_b0_bt_8_115200] = { 1714 [pbn_b0_bt_8_115200] = {
1707 .flags = FL_BASE0|FL_BASE_BARS, 1715 .flags = FL_BASE0|FL_BASE_BARS,
1708 .num_ports = 8, 1716 .num_ports = 8,
@@ -3191,6 +3199,15 @@ static struct pci_device_id serial_pci_tbl[] = {
3191 0x1208, 0x0004, 0, 0, 3199 0x1208, 0x0004, 0, 0,
3192 pbn_b0_4_921600 }, 3200 pbn_b0_4_921600 },
3193 3201
3202 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
3203 0x1204, 0x0004, 0, 0,
3204 pbn_b0_4_921600 },
3205 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
3206 0x1208, 0x0004, 0, 0,
3207 pbn_b0_4_921600 },
3208 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3,
3209 0x1208, 0x0004, 0, 0,
3210 pbn_b0_4_921600 },
3194 /* 3211 /*
3195 * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com 3212 * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com
3196 */ 3213 */
@@ -3649,6 +3666,18 @@ static struct pci_device_id serial_pci_tbl[] = {
3649 0, 0, pbn_b0_1_115200 }, 3666 0, 0, pbn_b0_1_115200 },
3650 3667
3651 /* 3668 /*
3669 * Best Connectivity PCI Multi I/O cards
3670 */
3671
3672 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3673 0xA000, 0x1000,
3674 0, 0, pbn_b0_1_115200 },
3675
3676 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3677 0xA000, 0x3004,
3678 0, 0, pbn_b0_bt_4_115200 },
3679
3680 /*
3652 * These entries match devices with class COMMUNICATION_SERIAL, 3681 * These entries match devices with class COMMUNICATION_SERIAL,
3653 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL 3682 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
3654 */ 3683 */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 888a0ce91c4b..746e07033dce 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1418,42 +1418,37 @@ config SERIAL_BFIN_SPORT
1418 To compile this driver as a module, choose M here: the 1418 To compile this driver as a module, choose M here: the
1419 module will be called bfin_sport_uart. 1419 module will be called bfin_sport_uart.
1420 1420
1421choice 1421config SERIAL_BFIN_SPORT_CONSOLE
1422 prompt "Baud rate for Blackfin SPORT UART" 1422 bool "Console on Blackfin sport emulated uart"
1423 depends on SERIAL_BFIN_SPORT 1423 depends on SERIAL_BFIN_SPORT=y
1424 default SERIAL_SPORT_BAUD_RATE_57600 1424 select SERIAL_CORE_CONSOLE
1425 help
1426 Choose a baud rate for the SPORT UART, other uart settings are
1427 8 bit, 1 stop bit, no parity, no flow control.
1428
1429config SERIAL_SPORT_BAUD_RATE_115200
1430 bool "115200"
1431
1432config SERIAL_SPORT_BAUD_RATE_57600
1433 bool "57600"
1434 1425
1435config SERIAL_SPORT_BAUD_RATE_38400 1426config SERIAL_BFIN_SPORT0_UART
1436 bool "38400" 1427 bool "Enable UART over SPORT0"
1428 depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
1429 help
1430 Enable UART over SPORT0
1437 1431
1438config SERIAL_SPORT_BAUD_RATE_19200 1432config SERIAL_BFIN_SPORT1_UART
1439 bool "19200" 1433 bool "Enable UART over SPORT1"
1434 depends on SERIAL_BFIN_SPORT
1435 help
1436 Enable UART over SPORT1
1440 1437
1441config SERIAL_SPORT_BAUD_RATE_9600 1438config SERIAL_BFIN_SPORT2_UART
1442 bool "9600" 1439 bool "Enable UART over SPORT2"
1443endchoice 1440 depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
1441 help
1442 Enable UART over SPORT2
1444 1443
1445config SPORT_BAUD_RATE 1444config SERIAL_BFIN_SPORT3_UART
1446 int 1445 bool "Enable UART over SPORT3"
1447 depends on SERIAL_BFIN_SPORT 1446 depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
1448 default 115200 if (SERIAL_SPORT_BAUD_RATE_115200) 1447 help
1449 default 57600 if (SERIAL_SPORT_BAUD_RATE_57600) 1448 Enable UART over SPORT3
1450 default 38400 if (SERIAL_SPORT_BAUD_RATE_38400)
1451 default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
1452 default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
1453 1449
1454config SERIAL_TIMBERDALE 1450config SERIAL_TIMBERDALE
1455 tristate "Support for timberdale UART" 1451 tristate "Support for timberdale UART"
1456 depends on MFD_TIMBERDALE
1457 select SERIAL_CORE 1452 select SERIAL_CORE
1458 ---help--- 1453 ---help---
1459 Add support for UART controller on timberdale. 1454 Add support for UART controller on timberdale.
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 9d948bccafaf..2c9bf9b68327 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1213,6 +1213,24 @@ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
1213 return ret; 1213 return ret;
1214} 1214}
1215 1215
1216#ifdef CONFIG_CONSOLE_POLL
1217static int atmel_poll_get_char(struct uart_port *port)
1218{
1219 while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
1220 cpu_relax();
1221
1222 return UART_GET_CHAR(port);
1223}
1224
1225static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
1226{
1227 while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
1228 cpu_relax();
1229
1230 UART_PUT_CHAR(port, ch);
1231}
1232#endif
1233
1216static struct uart_ops atmel_pops = { 1234static struct uart_ops atmel_pops = {
1217 .tx_empty = atmel_tx_empty, 1235 .tx_empty = atmel_tx_empty,
1218 .set_mctrl = atmel_set_mctrl, 1236 .set_mctrl = atmel_set_mctrl,
@@ -1232,6 +1250,10 @@ static struct uart_ops atmel_pops = {
1232 .config_port = atmel_config_port, 1250 .config_port = atmel_config_port,
1233 .verify_port = atmel_verify_port, 1251 .verify_port = atmel_verify_port,
1234 .pm = atmel_serial_pm, 1252 .pm = atmel_serial_pm,
1253#ifdef CONFIG_CONSOLE_POLL
1254 .poll_get_char = atmel_poll_get_char,
1255 .poll_put_char = atmel_poll_put_char,
1256#endif
1235}; 1257};
1236 1258
1237/* 1259/*
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
index 37ad0c449937..a1a0e55d0807 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/serial/bcm63xx_uart.c
@@ -35,7 +35,7 @@
35#include <bcm63xx_regs.h> 35#include <bcm63xx_regs.h>
36#include <bcm63xx_io.h> 36#include <bcm63xx_io.h>
37 37
38#define BCM63XX_NR_UARTS 1 38#define BCM63XX_NR_UARTS 2
39 39
40static struct uart_port ports[BCM63XX_NR_UARTS]; 40static struct uart_port ports[BCM63XX_NR_UARTS];
41 41
@@ -784,7 +784,7 @@ static struct uart_driver bcm_uart_driver = {
784 .dev_name = "ttyS", 784 .dev_name = "ttyS",
785 .major = TTY_MAJOR, 785 .major = TTY_MAJOR,
786 .minor = 64, 786 .minor = 64,
787 .nr = 1, 787 .nr = BCM63XX_NR_UARTS,
788 .cons = BCM63XX_CONSOLE, 788 .cons = BCM63XX_CONSOLE,
789}; 789};
790 790
@@ -826,11 +826,12 @@ static int __devinit bcm_uart_probe(struct platform_device *pdev)
826 port->dev = &pdev->dev; 826 port->dev = &pdev->dev;
827 port->fifosize = 16; 827 port->fifosize = 16;
828 port->uartclk = clk_get_rate(clk) / 2; 828 port->uartclk = clk_get_rate(clk) / 2;
829 port->line = pdev->id;
829 clk_put(clk); 830 clk_put(clk);
830 831
831 ret = uart_add_one_port(&bcm_uart_driver, port); 832 ret = uart_add_one_port(&bcm_uart_driver, port);
832 if (ret) { 833 if (ret) {
833 kfree(port); 834 ports[pdev->id].membase = 0;
834 return ret; 835 return ret;
835 } 836 }
836 platform_set_drvdata(pdev, port); 837 platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 50abb7e557f4..fcf273e3f48c 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/ioport.h> 16#include <linux/ioport.h>
17#include <linux/io.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/console.h> 19#include <linux/console.h>
19#include <linux/sysrq.h> 20#include <linux/sysrq.h>
@@ -237,7 +238,8 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
237 238
238#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ 239#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
239 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) 240 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
240 if (kgdb_connected && kgdboc_port_line == uart->port.line) 241 if (kgdb_connected && kgdboc_port_line == uart->port.line
242 && kgdboc_break_enabled)
241 if (ch == 0x3) {/* Ctrl + C */ 243 if (ch == 0x3) {/* Ctrl + C */
242 kgdb_breakpoint(); 244 kgdb_breakpoint();
243 return; 245 return;
@@ -488,6 +490,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
488{ 490{
489 int x_pos, pos; 491 int x_pos, pos;
490 492
493 dma_disable_irq(uart->tx_dma_channel);
491 dma_disable_irq(uart->rx_dma_channel); 494 dma_disable_irq(uart->rx_dma_channel);
492 spin_lock_bh(&uart->port.lock); 495 spin_lock_bh(&uart->port.lock);
493 496
@@ -521,6 +524,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
521 } 524 }
522 525
523 spin_unlock_bh(&uart->port.lock); 526 spin_unlock_bh(&uart->port.lock);
527 dma_enable_irq(uart->tx_dma_channel);
524 dma_enable_irq(uart->rx_dma_channel); 528 dma_enable_irq(uart->rx_dma_channel);
525 529
526 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); 530 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -746,15 +750,6 @@ static int bfin_serial_startup(struct uart_port *port)
746 Status interrupt.\n"); 750 Status interrupt.\n");
747 } 751 }
748 752
749 if (uart->cts_pin >= 0) {
750 gpio_request(uart->cts_pin, DRIVER_NAME);
751 gpio_direction_output(uart->cts_pin, 1);
752 }
753 if (uart->rts_pin >= 0) {
754 gpio_request(uart->rts_pin, DRIVER_NAME);
755 gpio_direction_output(uart->rts_pin, 0);
756 }
757
758 /* CTS RTS PINs are negative assertive. */ 753 /* CTS RTS PINs are negative assertive. */
759 UART_PUT_MCR(uart, ACTS); 754 UART_PUT_MCR(uart, ACTS);
760 UART_SET_IER(uart, EDSSI); 755 UART_SET_IER(uart, EDSSI);
@@ -801,10 +796,6 @@ static void bfin_serial_shutdown(struct uart_port *port)
801 gpio_free(uart->rts_pin); 796 gpio_free(uart->rts_pin);
802#endif 797#endif
803#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS 798#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
804 if (uart->cts_pin >= 0)
805 gpio_free(uart->cts_pin);
806 if (uart->rts_pin >= 0)
807 gpio_free(uart->rts_pin);
808 if (UART_GET_IER(uart) && EDSSI) 799 if (UART_GET_IER(uart) && EDSSI)
809 free_irq(uart->status_irq, uart); 800 free_irq(uart->status_irq, uart);
810#endif 801#endif
@@ -1409,8 +1400,7 @@ static int bfin_serial_remove(struct platform_device *dev)
1409 continue; 1400 continue;
1410 uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port); 1401 uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
1411 bfin_serial_ports[i].port.dev = NULL; 1402 bfin_serial_ports[i].port.dev = NULL;
1412#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \ 1403#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
1413 defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
1414 gpio_free(bfin_serial_ports[i].cts_pin); 1404 gpio_free(bfin_serial_ports[i].cts_pin);
1415 gpio_free(bfin_serial_ports[i].rts_pin); 1405 gpio_free(bfin_serial_ports[i].rts_pin);
1416#endif 1406#endif
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 088bb35475f1..7c72888fbf94 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -1,27 +1,11 @@
1/* 1/*
2 * File: linux/drivers/serial/bfin_sport_uart.c 2 * Blackfin On-Chip Sport Emulated UART Driver
3 * 3 *
4 * Based on: drivers/serial/bfin_5xx.c by Aubrey Li. 4 * Copyright 2006-2009 Analog Devices Inc.
5 * Author: Roy Huang <roy.huang@analog.com>
6 * 5 *
7 * Created: Nov 22, 2006 6 * Enter bugs at http://blackfin.uclinux.org/
8 * Copyright: (c) 2006-2007 Analog Devices Inc.
9 * Description: this driver enable SPORTs on Blackfin emulate UART.
10 * 7 *
11 * This program is free software; you can redistribute it and/or modify 8 * Licensed under the GPL-2 or later.
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see the file COPYING, or write
23 * to the Free Software Foundation, Inc.,
24 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */ 9 */
26 10
27/* 11/*
@@ -29,39 +13,18 @@
29 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf 13 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
30 * This application note describe how to implement a UART on a Sharc DSP, 14 * This application note describe how to implement a UART on a Sharc DSP,
31 * but this driver is implemented on Blackfin Processor. 15 * but this driver is implemented on Blackfin Processor.
16 * Transmit Frame Sync is not used by this driver to transfer data out.
32 */ 17 */
33 18
34/* After reset, there is a prelude of low level pulse when transmit data first 19/* #define DEBUG */
35 * time. No addtional pulse in following transmit.
36 * According to document:
37 * The SPORTs are ready to start transmitting or receiving data no later than
38 * three serial clock cycles after they are enabled in the SPORTx_TCR1 or
39 * SPORTx_RCR1 register. No serial clock cycles are lost from this point on.
40 * The first internal frame sync will occur one frame sync delay after the
41 * SPORTs are ready. External frame syncs can occur as soon as the SPORT is
42 * ready.
43 */
44 20
45/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes 21#define DRV_NAME "bfin-sport-uart"
46 * sport receives data incorrectly. The following is Axel's words. 22#define DEVICE_NAME "ttySS"
47 * As EE-191, sport rx samples 3 times of the UART baudrate and takes the 23#define pr_fmt(fmt) DRV_NAME ": " fmt
48 * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting,
49 * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short
50 * byte due to buadrate drift, then the 30th sample of a byte, this sample is
51 * also the third sample of the stop bit, will happens on the immediately
52 * following start bit which will be thrown away and missed. Thus since parts
53 * of the startbit will be missed and the receiver will begin to drift, the
54 * effect accumulates over time until synchronization is lost.
55 * If only require 2 samples of the stopbit (by sampling in total 29 samples),
56 * then a to short byte as in the case above will be tolerated. Then the 1/3
57 * early startbit will trigger a framesync since the last read is complete
58 * after only 2/3 stopbit and framesync is active during the last 1/3 looking
59 * for a possible early startbit. */
60
61//#define DEBUG
62 24
63#include <linux/module.h> 25#include <linux/module.h>
64#include <linux/ioport.h> 26#include <linux/ioport.h>
27#include <linux/io.h>
65#include <linux/init.h> 28#include <linux/init.h>
66#include <linux/console.h> 29#include <linux/console.h>
67#include <linux/sysrq.h> 30#include <linux/sysrq.h>
@@ -75,23 +38,36 @@
75 38
76#include "bfin_sport_uart.h" 39#include "bfin_sport_uart.h"
77 40
41#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
78unsigned short bfin_uart_pin_req_sport0[] = 42unsigned short bfin_uart_pin_req_sport0[] =
79 {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \ 43 {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
80 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0}; 44 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
81 45#endif
46#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
82unsigned short bfin_uart_pin_req_sport1[] = 47unsigned short bfin_uart_pin_req_sport1[] =
83 {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \ 48 {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
84 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0}; 49 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
85 50#endif
86#define DRV_NAME "bfin-sport-uart" 51#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
52unsigned short bfin_uart_pin_req_sport2[] =
53 {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
54 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
55#endif
56#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
57unsigned short bfin_uart_pin_req_sport3[] =
58 {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
59 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
60#endif
87 61
88struct sport_uart_port { 62struct sport_uart_port {
89 struct uart_port port; 63 struct uart_port port;
90 char *name;
91
92 int tx_irq;
93 int rx_irq;
94 int err_irq; 64 int err_irq;
65 unsigned short csize;
66 unsigned short rxmask;
67 unsigned short txmask1;
68 unsigned short txmask2;
69 unsigned char stopb;
70/* unsigned char parib; */
95}; 71};
96 72
97static void sport_uart_tx_chars(struct sport_uart_port *up); 73static void sport_uart_tx_chars(struct sport_uart_port *up);
@@ -99,36 +75,42 @@ static void sport_stop_tx(struct uart_port *port);
99 75
100static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) 76static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
101{ 77{
102 pr_debug("%s value:%x\n", __func__, value); 78 pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value,
103 /* Place a Start and Stop bit */ 79 up->txmask1, up->txmask2);
80
81 /* Place Start and Stop bits */
104 __asm__ __volatile__ ( 82 __asm__ __volatile__ (
105 "R2 = b#01111111100;" 83 "%[val] <<= 1;"
106 "R3 = b#10000000001;" 84 "%[val] = %[val] & %[mask1];"
107 "%0 <<= 2;" 85 "%[val] = %[val] | %[mask2];"
108 "%0 = %0 & R2;" 86 : [val]"+d"(value)
109 "%0 = %0 | R3;" 87 : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2)
110 : "=d"(value) 88 : "ASTAT"
111 : "d"(value)
112 : "ASTAT", "R2", "R3"
113 ); 89 );
114 pr_debug("%s value:%x\n", __func__, value); 90 pr_debug("%s value:%x\n", __func__, value);
115 91
116 SPORT_PUT_TX(up, value); 92 SPORT_PUT_TX(up, value);
117} 93}
118 94
119static inline unsigned int rx_one_byte(struct sport_uart_port *up) 95static inline unsigned char rx_one_byte(struct sport_uart_port *up)
120{ 96{
121 unsigned int value, extract; 97 unsigned int value;
98 unsigned char extract;
122 u32 tmp_mask1, tmp_mask2, tmp_shift, tmp; 99 u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
123 100
124 value = SPORT_GET_RX32(up); 101 if ((up->csize + up->stopb) > 7)
125 pr_debug("%s value:%x\n", __func__, value); 102 value = SPORT_GET_RX32(up);
103 else
104 value = SPORT_GET_RX(up);
105
106 pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value,
107 up->csize, up->rxmask);
126 108
127 /* Extract 8 bits data */ 109 /* Extract data */
128 __asm__ __volatile__ ( 110 __asm__ __volatile__ (
129 "%[extr] = 0;" 111 "%[extr] = 0;"
130 "%[mask1] = 0x1801(Z);" 112 "%[mask1] = %[rxmask];"
131 "%[mask2] = 0x0300(Z);" 113 "%[mask2] = 0x0200(Z);"
132 "%[shift] = 0;" 114 "%[shift] = 0;"
133 "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];" 115 "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
134 ".Lloop_s:" 116 ".Lloop_s:"
@@ -138,9 +120,9 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
138 "%[mask1] = %[mask1] - %[mask2];" 120 "%[mask1] = %[mask1] - %[mask2];"
139 ".Lloop_e:" 121 ".Lloop_e:"
140 "%[shift] += 1;" 122 "%[shift] += 1;"
141 : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp), 123 : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp),
142 [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2) 124 [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2)
143 : "d"(value), [lc]"a"(8) 125 : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize)
144 : "ASTAT", "LB0", "LC0", "LT0" 126 : "ASTAT", "LB0", "LC0", "LT0"
145 ); 127 );
146 128
@@ -148,29 +130,28 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
148 return extract; 130 return extract;
149} 131}
150 132
151static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate) 133static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
152{ 134{
153 int tclkdiv, tfsdiv, rclkdiv; 135 int tclkdiv, rclkdiv;
136 unsigned int sclk = get_sclk();
154 137
155 /* Set TCR1 and TCR2 */ 138 /* Set TCR1 and TCR2, TFSR is not enabled for uart */
156 SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); 139 SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
157 SPORT_PUT_TCR2(up, 10); 140 SPORT_PUT_TCR2(up, size + 1);
158 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); 141 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
159 142
160 /* Set RCR1 and RCR2 */ 143 /* Set RCR1 and RCR2 */
161 SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK)); 144 SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
162 SPORT_PUT_RCR2(up, 28); 145 SPORT_PUT_RCR2(up, (size + 1) * 2 - 1);
163 pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); 146 pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
164 147
165 tclkdiv = sclk/(2 * baud_rate) - 1; 148 tclkdiv = sclk / (2 * baud_rate) - 1;
166 tfsdiv = 12; 149 rclkdiv = sclk / (2 * baud_rate * 2) - 1;
167 rclkdiv = sclk/(2 * baud_rate * 3) - 1;
168 SPORT_PUT_TCLKDIV(up, tclkdiv); 150 SPORT_PUT_TCLKDIV(up, tclkdiv);
169 SPORT_PUT_TFSDIV(up, tfsdiv);
170 SPORT_PUT_RCLKDIV(up, rclkdiv); 151 SPORT_PUT_RCLKDIV(up, rclkdiv);
171 SSYNC(); 152 SSYNC();
172 pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n", 153 pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n",
173 __func__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv); 154 __func__, sclk, baud_rate, tclkdiv, rclkdiv);
174 155
175 return 0; 156 return 0;
176} 157}
@@ -181,23 +162,29 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
181 struct tty_struct *tty = up->port.state->port.tty; 162 struct tty_struct *tty = up->port.state->port.tty;
182 unsigned int ch; 163 unsigned int ch;
183 164
184 do { 165 spin_lock(&up->port.lock);
166
167 while (SPORT_GET_STAT(up) & RXNE) {
185 ch = rx_one_byte(up); 168 ch = rx_one_byte(up);
186 up->port.icount.rx++; 169 up->port.icount.rx++;
187 170
188 if (uart_handle_sysrq_char(&up->port, ch)) 171 if (!uart_handle_sysrq_char(&up->port, ch))
189 ;
190 else
191 tty_insert_flip_char(tty, ch, TTY_NORMAL); 172 tty_insert_flip_char(tty, ch, TTY_NORMAL);
192 } while (SPORT_GET_STAT(up) & RXNE); 173 }
193 tty_flip_buffer_push(tty); 174 tty_flip_buffer_push(tty);
194 175
176 spin_unlock(&up->port.lock);
177
195 return IRQ_HANDLED; 178 return IRQ_HANDLED;
196} 179}
197 180
198static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id) 181static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
199{ 182{
200 sport_uart_tx_chars(dev_id); 183 struct sport_uart_port *up = dev_id;
184
185 spin_lock(&up->port.lock);
186 sport_uart_tx_chars(up);
187 spin_unlock(&up->port.lock);
201 188
202 return IRQ_HANDLED; 189 return IRQ_HANDLED;
203} 190}
@@ -208,6 +195,8 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
208 struct tty_struct *tty = up->port.state->port.tty; 195 struct tty_struct *tty = up->port.state->port.tty;
209 unsigned int stat = SPORT_GET_STAT(up); 196 unsigned int stat = SPORT_GET_STAT(up);
210 197
198 spin_lock(&up->port.lock);
199
211 /* Overflow in RX FIFO */ 200 /* Overflow in RX FIFO */
212 if (stat & ROVF) { 201 if (stat & ROVF) {
213 up->port.icount.overrun++; 202 up->port.icount.overrun++;
@@ -216,15 +205,16 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
216 } 205 }
217 /* These should not happen */ 206 /* These should not happen */
218 if (stat & (TOVF | TUVF | RUVF)) { 207 if (stat & (TOVF | TUVF | RUVF)) {
219 printk(KERN_ERR "SPORT Error:%s %s %s\n", 208 pr_err("SPORT Error:%s %s %s\n",
220 (stat & TOVF)?"TX overflow":"", 209 (stat & TOVF) ? "TX overflow" : "",
221 (stat & TUVF)?"TX underflow":"", 210 (stat & TUVF) ? "TX underflow" : "",
222 (stat & RUVF)?"RX underflow":""); 211 (stat & RUVF) ? "RX underflow" : "");
223 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); 212 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
224 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN); 213 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
225 } 214 }
226 SSYNC(); 215 SSYNC();
227 216
217 spin_unlock(&up->port.lock);
228 return IRQ_HANDLED; 218 return IRQ_HANDLED;
229} 219}
230 220
@@ -232,60 +222,37 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
232static int sport_startup(struct uart_port *port) 222static int sport_startup(struct uart_port *port)
233{ 223{
234 struct sport_uart_port *up = (struct sport_uart_port *)port; 224 struct sport_uart_port *up = (struct sport_uart_port *)port;
235 char buffer[20]; 225 int ret;
236 int retval;
237 226
238 pr_debug("%s enter\n", __func__); 227 pr_debug("%s enter\n", __func__);
239 snprintf(buffer, 20, "%s rx", up->name); 228 ret = request_irq(up->port.irq, sport_uart_rx_irq, 0,
240 retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); 229 "SPORT_UART_RX", up);
241 if (retval) { 230 if (ret) {
242 printk(KERN_ERR "Unable to request interrupt %s\n", buffer); 231 dev_err(port->dev, "unable to request SPORT RX interrupt\n");
243 return retval; 232 return ret;
244 } 233 }
245 234
246 snprintf(buffer, 20, "%s tx", up->name); 235 ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0,
247 retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up); 236 "SPORT_UART_TX", up);
248 if (retval) { 237 if (ret) {
249 printk(KERN_ERR "Unable to request interrupt %s\n", buffer); 238 dev_err(port->dev, "unable to request SPORT TX interrupt\n");
250 goto fail1; 239 goto fail1;
251 } 240 }
252 241
253 snprintf(buffer, 20, "%s err", up->name); 242 ret = request_irq(up->err_irq, sport_uart_err_irq, 0,
254 retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up); 243 "SPORT_UART_STATUS", up);
255 if (retval) { 244 if (ret) {
256 printk(KERN_ERR "Unable to request interrupt %s\n", buffer); 245 dev_err(port->dev, "unable to request SPORT status interrupt\n");
257 goto fail2; 246 goto fail2;
258 } 247 }
259 248
260 if (port->line) {
261 if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME))
262 goto fail3;
263 } else {
264 if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME))
265 goto fail3;
266 }
267
268 sport_uart_setup(up, get_sclk(), port->uartclk);
269
270 /* Enable receive interrupt */
271 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN));
272 SSYNC();
273
274 return 0; 249 return 0;
250 fail2:
251 free_irq(up->port.irq+1, up);
252 fail1:
253 free_irq(up->port.irq, up);
275 254
276 255 return ret;
277fail3:
278 printk(KERN_ERR DRV_NAME
279 ": Requesting Peripherals failed\n");
280
281 free_irq(up->err_irq, up);
282fail2:
283 free_irq(up->tx_irq, up);
284fail1:
285 free_irq(up->rx_irq, up);
286
287 return retval;
288
289} 256}
290 257
291static void sport_uart_tx_chars(struct sport_uart_port *up) 258static void sport_uart_tx_chars(struct sport_uart_port *up)
@@ -344,20 +311,17 @@ static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
344static void sport_stop_tx(struct uart_port *port) 311static void sport_stop_tx(struct uart_port *port)
345{ 312{
346 struct sport_uart_port *up = (struct sport_uart_port *)port; 313 struct sport_uart_port *up = (struct sport_uart_port *)port;
347 unsigned int stat;
348 314
349 pr_debug("%s enter\n", __func__); 315 pr_debug("%s enter\n", __func__);
350 316
351 stat = SPORT_GET_STAT(up);
352 while(!(stat & TXHRE)) {
353 udelay(1);
354 stat = SPORT_GET_STAT(up);
355 }
356 /* Although the hold register is empty, last byte is still in shift 317 /* Although the hold register is empty, last byte is still in shift
357 * register and not sent out yet. If baud rate is lower than default, 318 * register and not sent out yet. So, put a dummy data into TX FIFO.
358 * delay should be longer. For example, if the baud rate is 9600, 319 * Then, sport tx stops when last byte is shift out and the dummy
359 * the delay must be at least 2ms by experience */ 320 * data is moved into the shift register.
360 udelay(500); 321 */
322 SPORT_PUT_TX(up, 0xffff);
323 while (!(SPORT_GET_STAT(up) & TXHRE))
324 cpu_relax();
361 325
362 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); 326 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
363 SSYNC(); 327 SSYNC();
@@ -370,6 +334,7 @@ static void sport_start_tx(struct uart_port *port)
370 struct sport_uart_port *up = (struct sport_uart_port *)port; 334 struct sport_uart_port *up = (struct sport_uart_port *)port;
371 335
372 pr_debug("%s enter\n", __func__); 336 pr_debug("%s enter\n", __func__);
337
373 /* Write data into SPORT FIFO before enable SPROT to transmit */ 338 /* Write data into SPORT FIFO before enable SPROT to transmit */
374 sport_uart_tx_chars(up); 339 sport_uart_tx_chars(up);
375 340
@@ -403,37 +368,24 @@ static void sport_shutdown(struct uart_port *port)
403{ 368{
404 struct sport_uart_port *up = (struct sport_uart_port *)port; 369 struct sport_uart_port *up = (struct sport_uart_port *)port;
405 370
406 pr_debug("%s enter\n", __func__); 371 dev_dbg(port->dev, "%s enter\n", __func__);
407 372
408 /* Disable sport */ 373 /* Disable sport */
409 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); 374 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
410 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); 375 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
411 SSYNC(); 376 SSYNC();
412 377
413 if (port->line) { 378 free_irq(up->port.irq, up);
414 peripheral_free_list(bfin_uart_pin_req_sport1); 379 free_irq(up->port.irq+1, up);
415 } else {
416 peripheral_free_list(bfin_uart_pin_req_sport0);
417 }
418
419 free_irq(up->rx_irq, up);
420 free_irq(up->tx_irq, up);
421 free_irq(up->err_irq, up); 380 free_irq(up->err_irq, up);
422} 381}
423 382
424static void sport_set_termios(struct uart_port *port,
425 struct ktermios *termios, struct ktermios *old)
426{
427 pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
428 uart_update_timeout(port, CS8 ,port->uartclk);
429}
430
431static const char *sport_type(struct uart_port *port) 383static const char *sport_type(struct uart_port *port)
432{ 384{
433 struct sport_uart_port *up = (struct sport_uart_port *)port; 385 struct sport_uart_port *up = (struct sport_uart_port *)port;
434 386
435 pr_debug("%s enter\n", __func__); 387 pr_debug("%s enter\n", __func__);
436 return up->name; 388 return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL;
437} 389}
438 390
439static void sport_release_port(struct uart_port *port) 391static void sport_release_port(struct uart_port *port)
@@ -461,6 +413,110 @@ static int sport_verify_port(struct uart_port *port, struct serial_struct *ser)
461 return 0; 413 return 0;
462} 414}
463 415
416static void sport_set_termios(struct uart_port *port,
417 struct ktermios *termios, struct ktermios *old)
418{
419 struct sport_uart_port *up = (struct sport_uart_port *)port;
420 unsigned long flags;
421 int i;
422
423 pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
424
425 switch (termios->c_cflag & CSIZE) {
426 case CS8:
427 up->csize = 8;
428 break;
429 case CS7:
430 up->csize = 7;
431 break;
432 case CS6:
433 up->csize = 6;
434 break;
435 case CS5:
436 up->csize = 5;
437 break;
438 default:
439 pr_warning("requested word length not supported\n");
440 }
441
442 if (termios->c_cflag & CSTOPB) {
443 up->stopb = 1;
444 }
445 if (termios->c_cflag & PARENB) {
446 pr_warning("PAREN bits is not supported yet\n");
447 /* up->parib = 1; */
448 }
449
450 port->read_status_mask = OE;
451 if (termios->c_iflag & INPCK)
452 port->read_status_mask |= (FE | PE);
453 if (termios->c_iflag & (BRKINT | PARMRK))
454 port->read_status_mask |= BI;
455
456 /*
457 * Characters to ignore
458 */
459 port->ignore_status_mask = 0;
460 if (termios->c_iflag & IGNPAR)
461 port->ignore_status_mask |= FE | PE;
462 if (termios->c_iflag & IGNBRK) {
463 port->ignore_status_mask |= BI;
464 /*
465 * If we're ignoring parity and break indicators,
466 * ignore overruns too (for real raw support).
467 */
468 if (termios->c_iflag & IGNPAR)
469 port->ignore_status_mask |= OE;
470 }
471
472 /* RX extract mask */
473 up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
474 /* TX masks, 8 bit data and 1 bit stop for example:
475 * mask1 = b#0111111110
476 * mask2 = b#1000000000
477 */
478 for (i = 0, up->txmask1 = 0; i < up->csize; i++)
479 up->txmask1 |= (1<<i);
480 up->txmask2 = (1<<i);
481 if (up->stopb) {
482 ++i;
483 up->txmask2 |= (1<<i);
484 }
485 up->txmask1 <<= 1;
486 up->txmask2 <<= 1;
487 /* uart baud rate */
488 port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
489
490 spin_lock_irqsave(&up->port.lock, flags);
491
492 /* Disable UART */
493 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
494 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
495
496 sport_uart_setup(up, up->csize + up->stopb, port->uartclk);
497
498 /* driver TX line high after config, one dummy data is
499 * necessary to stop sport after shift one byte
500 */
501 SPORT_PUT_TX(up, 0xffff);
502 SPORT_PUT_TX(up, 0xffff);
503 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
504 SSYNC();
505 while (!(SPORT_GET_STAT(up) & TXHRE))
506 cpu_relax();
507 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
508 SSYNC();
509
510 /* Port speed changed, update the per-port timeout. */
511 uart_update_timeout(port, termios->c_cflag, port->uartclk);
512
513 /* Enable sport rx */
514 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN);
515 SSYNC();
516
517 spin_unlock_irqrestore(&up->port.lock, flags);
518}
519
464struct uart_ops sport_uart_ops = { 520struct uart_ops sport_uart_ops = {
465 .tx_empty = sport_tx_empty, 521 .tx_empty = sport_tx_empty,
466 .set_mctrl = sport_set_mctrl, 522 .set_mctrl = sport_set_mctrl,
@@ -480,138 +536,319 @@ struct uart_ops sport_uart_ops = {
480 .verify_port = sport_verify_port, 536 .verify_port = sport_verify_port,
481}; 537};
482 538
483static struct sport_uart_port sport_uart_ports[] = { 539#define BFIN_SPORT_UART_MAX_PORTS 4
484 { /* SPORT 0 */ 540
485 .name = "SPORT0", 541static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
486 .tx_irq = IRQ_SPORT0_TX, 542
487 .rx_irq = IRQ_SPORT0_RX, 543#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
488 .err_irq= IRQ_SPORT0_ERROR, 544static int __init
489 .port = { 545sport_uart_console_setup(struct console *co, char *options)
490 .type = PORT_BFIN_SPORT, 546{
491 .iotype = UPIO_MEM, 547 struct sport_uart_port *up;
492 .membase = (void __iomem *)SPORT0_TCR1, 548 int baud = 57600;
493 .mapbase = SPORT0_TCR1, 549 int bits = 8;
494 .irq = IRQ_SPORT0_RX, 550 int parity = 'n';
495 .uartclk = CONFIG_SPORT_BAUD_RATE, 551 int flow = 'n';
496 .fifosize = 8, 552
497 .ops = &sport_uart_ops, 553 /* Check whether an invalid uart number has been specified */
498 .line = 0, 554 if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
499 }, 555 return -ENODEV;
500 }, { /* SPORT 1 */ 556
501 .name = "SPORT1", 557 up = bfin_sport_uart_ports[co->index];
502 .tx_irq = IRQ_SPORT1_TX, 558 if (!up)
503 .rx_irq = IRQ_SPORT1_RX, 559 return -ENODEV;
504 .err_irq= IRQ_SPORT1_ERROR, 560
505 .port = { 561 if (options)
506 .type = PORT_BFIN_SPORT, 562 uart_parse_options(options, &baud, &parity, &bits, &flow);
507 .iotype = UPIO_MEM, 563
508 .membase = (void __iomem *)SPORT1_TCR1, 564 return uart_set_options(&up->port, co, baud, parity, bits, flow);
509 .mapbase = SPORT1_TCR1, 565}
510 .irq = IRQ_SPORT1_RX, 566
511 .uartclk = CONFIG_SPORT_BAUD_RATE, 567static void sport_uart_console_putchar(struct uart_port *port, int ch)
512 .fifosize = 8, 568{
513 .ops = &sport_uart_ops, 569 struct sport_uart_port *up = (struct sport_uart_port *)port;
514 .line = 1, 570
515 }, 571 while (SPORT_GET_STAT(up) & TXF)
572 barrier();
573
574 tx_one_byte(up, ch);
575}
576
577/*
578 * Interrupts are disabled on entering
579 */
580static void
581sport_uart_console_write(struct console *co, const char *s, unsigned int count)
582{
583 struct sport_uart_port *up = bfin_sport_uart_ports[co->index];
584 unsigned long flags;
585
586 spin_lock_irqsave(&up->port.lock, flags);
587
588 if (SPORT_GET_TCR1(up) & TSPEN)
589 uart_console_write(&up->port, s, count, sport_uart_console_putchar);
590 else {
591 /* dummy data to start sport */
592 while (SPORT_GET_STAT(up) & TXF)
593 barrier();
594 SPORT_PUT_TX(up, 0xffff);
595 /* Enable transmit, then an interrupt will generated */
596 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
597 SSYNC();
598
599 uart_console_write(&up->port, s, count, sport_uart_console_putchar);
600
601 /* Although the hold register is empty, last byte is still in shift
602 * register and not sent out yet. So, put a dummy data into TX FIFO.
603 * Then, sport tx stops when last byte is shift out and the dummy
604 * data is moved into the shift register.
605 */
606 while (SPORT_GET_STAT(up) & TXF)
607 barrier();
608 SPORT_PUT_TX(up, 0xffff);
609 while (!(SPORT_GET_STAT(up) & TXHRE))
610 barrier();
611
612 /* Stop sport tx transfer */
613 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
614 SSYNC();
516 } 615 }
616
617 spin_unlock_irqrestore(&up->port.lock, flags);
618}
619
620static struct uart_driver sport_uart_reg;
621
622static struct console sport_uart_console = {
623 .name = DEVICE_NAME,
624 .write = sport_uart_console_write,
625 .device = uart_console_device,
626 .setup = sport_uart_console_setup,
627 .flags = CON_PRINTBUFFER,
628 .index = -1,
629 .data = &sport_uart_reg,
517}; 630};
518 631
632#define SPORT_UART_CONSOLE (&sport_uart_console)
633#else
634#define SPORT_UART_CONSOLE NULL
635#endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */
636
637
519static struct uart_driver sport_uart_reg = { 638static struct uart_driver sport_uart_reg = {
520 .owner = THIS_MODULE, 639 .owner = THIS_MODULE,
521 .driver_name = "SPORT-UART", 640 .driver_name = DRV_NAME,
522 .dev_name = "ttySS", 641 .dev_name = DEVICE_NAME,
523 .major = 204, 642 .major = 204,
524 .minor = 84, 643 .minor = 84,
525 .nr = ARRAY_SIZE(sport_uart_ports), 644 .nr = BFIN_SPORT_UART_MAX_PORTS,
526 .cons = NULL, 645 .cons = SPORT_UART_CONSOLE,
527}; 646};
528 647
529static int sport_uart_suspend(struct platform_device *dev, pm_message_t state) 648#ifdef CONFIG_PM
649static int sport_uart_suspend(struct device *dev)
530{ 650{
531 struct sport_uart_port *sport = platform_get_drvdata(dev); 651 struct sport_uart_port *sport = dev_get_drvdata(dev);
532 652
533 pr_debug("%s enter\n", __func__); 653 dev_dbg(dev, "%s enter\n", __func__);
534 if (sport) 654 if (sport)
535 uart_suspend_port(&sport_uart_reg, &sport->port); 655 uart_suspend_port(&sport_uart_reg, &sport->port);
536 656
537 return 0; 657 return 0;
538} 658}
539 659
540static int sport_uart_resume(struct platform_device *dev) 660static int sport_uart_resume(struct device *dev)
541{ 661{
542 struct sport_uart_port *sport = platform_get_drvdata(dev); 662 struct sport_uart_port *sport = dev_get_drvdata(dev);
543 663
544 pr_debug("%s enter\n", __func__); 664 dev_dbg(dev, "%s enter\n", __func__);
545 if (sport) 665 if (sport)
546 uart_resume_port(&sport_uart_reg, &sport->port); 666 uart_resume_port(&sport_uart_reg, &sport->port);
547 667
548 return 0; 668 return 0;
549} 669}
550 670
551static int sport_uart_probe(struct platform_device *dev) 671static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = {
672 .suspend = sport_uart_suspend,
673 .resume = sport_uart_resume,
674};
675#endif
676
677static int __devinit sport_uart_probe(struct platform_device *pdev)
552{ 678{
553 pr_debug("%s enter\n", __func__); 679 struct resource *res;
554 sport_uart_ports[dev->id].port.dev = &dev->dev; 680 struct sport_uart_port *sport;
555 uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port); 681 int ret = 0;
556 platform_set_drvdata(dev, &sport_uart_ports[dev->id]);
557 682
558 return 0; 683 dev_dbg(&pdev->dev, "%s enter\n", __func__);
684
685 if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) {
686 dev_err(&pdev->dev, "Wrong sport uart platform device id.\n");
687 return -ENOENT;
688 }
689
690 if (bfin_sport_uart_ports[pdev->id] == NULL) {
691 bfin_sport_uart_ports[pdev->id] =
692 kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
693 sport = bfin_sport_uart_ports[pdev->id];
694 if (!sport) {
695 dev_err(&pdev->dev,
696 "Fail to kmalloc sport_uart_port\n");
697 return -ENOMEM;
698 }
699
700 ret = peripheral_request_list(
701 (unsigned short *)pdev->dev.platform_data, DRV_NAME);
702 if (ret) {
703 dev_err(&pdev->dev,
704 "Fail to request SPORT peripherals\n");
705 goto out_error_free_mem;
706 }
707
708 spin_lock_init(&sport->port.lock);
709 sport->port.fifosize = SPORT_TX_FIFO_SIZE,
710 sport->port.ops = &sport_uart_ops;
711 sport->port.line = pdev->id;
712 sport->port.iotype = UPIO_MEM;
713 sport->port.flags = UPF_BOOT_AUTOCONF;
714
715 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
716 if (res == NULL) {
717 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
718 ret = -ENOENT;
719 goto out_error_free_peripherals;
720 }
721
722 sport->port.membase = ioremap(res->start,
723 res->end - res->start);
724 if (!sport->port.membase) {
725 dev_err(&pdev->dev, "Cannot map sport IO\n");
726 ret = -ENXIO;
727 goto out_error_free_peripherals;
728 }
729
730 sport->port.irq = platform_get_irq(pdev, 0);
731 if (sport->port.irq < 0) {
732 dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n");
733 ret = -ENOENT;
734 goto out_error_unmap;
735 }
736
737 sport->err_irq = platform_get_irq(pdev, 1);
738 if (sport->err_irq < 0) {
739 dev_err(&pdev->dev, "No sport status IRQ specified\n");
740 ret = -ENOENT;
741 goto out_error_unmap;
742 }
743 }
744
745#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
746 if (!is_early_platform_device(pdev)) {
747#endif
748 sport = bfin_sport_uart_ports[pdev->id];
749 sport->port.dev = &pdev->dev;
750 dev_set_drvdata(&pdev->dev, sport);
751 ret = uart_add_one_port(&sport_uart_reg, &sport->port);
752#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
753 }
754#endif
755 if (!ret)
756 return 0;
757
758 if (sport) {
759out_error_unmap:
760 iounmap(sport->port.membase);
761out_error_free_peripherals:
762 peripheral_free_list(
763 (unsigned short *)pdev->dev.platform_data);
764out_error_free_mem:
765 kfree(sport);
766 bfin_sport_uart_ports[pdev->id] = NULL;
767 }
768
769 return ret;
559} 770}
560 771
561static int sport_uart_remove(struct platform_device *dev) 772static int __devexit sport_uart_remove(struct platform_device *pdev)
562{ 773{
563 struct sport_uart_port *sport = platform_get_drvdata(dev); 774 struct sport_uart_port *sport = platform_get_drvdata(pdev);
564 775
565 pr_debug("%s enter\n", __func__); 776 dev_dbg(&pdev->dev, "%s enter\n", __func__);
566 platform_set_drvdata(dev, NULL); 777 dev_set_drvdata(&pdev->dev, NULL);
567 778
568 if (sport) 779 if (sport) {
569 uart_remove_one_port(&sport_uart_reg, &sport->port); 780 uart_remove_one_port(&sport_uart_reg, &sport->port);
781 iounmap(sport->port.membase);
782 peripheral_free_list(
783 (unsigned short *)pdev->dev.platform_data);
784 kfree(sport);
785 bfin_sport_uart_ports[pdev->id] = NULL;
786 }
570 787
571 return 0; 788 return 0;
572} 789}
573 790
574static struct platform_driver sport_uart_driver = { 791static struct platform_driver sport_uart_driver = {
575 .probe = sport_uart_probe, 792 .probe = sport_uart_probe,
576 .remove = sport_uart_remove, 793 .remove = __devexit_p(sport_uart_remove),
577 .suspend = sport_uart_suspend,
578 .resume = sport_uart_resume,
579 .driver = { 794 .driver = {
580 .name = DRV_NAME, 795 .name = DRV_NAME,
796#ifdef CONFIG_PM
797 .pm = &bfin_sport_uart_dev_pm_ops,
798#endif
581 }, 799 },
582}; 800};
583 801
802#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
803static __initdata struct early_platform_driver early_sport_uart_driver = {
804 .class_str = DRV_NAME,
805 .pdrv = &sport_uart_driver,
806 .requested_id = EARLY_PLATFORM_ID_UNSET,
807};
808
809static int __init sport_uart_rs_console_init(void)
810{
811 early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
812
813 early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
814
815 register_console(&sport_uart_console);
816
817 return 0;
818}
819console_initcall(sport_uart_rs_console_init);
820#endif
821
584static int __init sport_uart_init(void) 822static int __init sport_uart_init(void)
585{ 823{
586 int ret; 824 int ret;
587 825
588 pr_debug("%s enter\n", __func__); 826 pr_info("Serial: Blackfin uart over sport driver\n");
827
589 ret = uart_register_driver(&sport_uart_reg); 828 ret = uart_register_driver(&sport_uart_reg);
590 if (ret != 0) { 829 if (ret) {
591 printk(KERN_ERR "Failed to register %s:%d\n", 830 pr_err("failed to register %s:%d\n",
592 sport_uart_reg.driver_name, ret); 831 sport_uart_reg.driver_name, ret);
593 return ret; 832 return ret;
594 } 833 }
595 834
596 ret = platform_driver_register(&sport_uart_driver); 835 ret = platform_driver_register(&sport_uart_driver);
597 if (ret != 0) { 836 if (ret) {
598 printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret); 837 pr_err("failed to register sport uart driver:%d\n", ret);
599 uart_unregister_driver(&sport_uart_reg); 838 uart_unregister_driver(&sport_uart_reg);
600 } 839 }
601 840
602
603 pr_debug("%s exit\n", __func__);
604 return ret; 841 return ret;
605} 842}
843module_init(sport_uart_init);
606 844
607static void __exit sport_uart_exit(void) 845static void __exit sport_uart_exit(void)
608{ 846{
609 pr_debug("%s enter\n", __func__);
610 platform_driver_unregister(&sport_uart_driver); 847 platform_driver_unregister(&sport_uart_driver);
611 uart_unregister_driver(&sport_uart_reg); 848 uart_unregister_driver(&sport_uart_reg);
612} 849}
613
614module_init(sport_uart_init);
615module_exit(sport_uart_exit); 850module_exit(sport_uart_exit);
616 851
852MODULE_AUTHOR("Sonic Zhang, Roy Huang");
853MODULE_DESCRIPTION("Blackfin serial over SPORT driver");
617MODULE_LICENSE("GPL"); 854MODULE_LICENSE("GPL");
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index 671d41cc1a3f..abe03614e4df 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -1,29 +1,23 @@
1/* 1/*
2 * File: linux/drivers/serial/bfin_sport_uart.h 2 * Blackfin On-Chip Sport Emulated UART Driver
3 * 3 *
4 * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h 4 * Copyright 2006-2008 Analog Devices Inc.
5 * Author: Roy Huang <roy.huang>analog.com>
6 * 5 *
7 * Created: Nov 22, 2006 6 * Enter bugs at http://blackfin.uclinux.org/
8 * Copyright: (C) Analog Device Inc.
9 * Description: this driver enable SPORTs on Blackfin emulate UART.
10 * 7 *
11 * This program is free software; you can redistribute it and/or modify 8 * Licensed under the GPL-2 or later.
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see the file COPYING, or write
23 * to the Free Software Foundation, Inc.,
24 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */ 9 */
26 10
11/*
12 * This driver and the hardware supported are in term of EE-191 of ADI.
13 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
14 * This application note describe how to implement a UART on a Sharc DSP,
15 * but this driver is implemented on Blackfin Processor.
16 * Transmit Frame Sync is not used by this driver to transfer data out.
17 */
18
19#ifndef _BFIN_SPORT_UART_H
20#define _BFIN_SPORT_UART_H
27 21
28#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */ 22#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */
29#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */ 23#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */
@@ -61,3 +55,7 @@
61#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v) 55#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
62#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v) 56#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
63#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v) 57#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
58
59#define SPORT_TX_FIFO_SIZE 8
60
61#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 0028b6f89ce6..53a468227056 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -751,7 +751,6 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
751 trace(icom_port, "FID_STATUS", status); 751 trace(icom_port, "FID_STATUS", status);
752 count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength); 752 count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
753 753
754 count = tty_buffer_request_room(tty, count);
755 trace(icom_port, "RCV_COUNT", count); 754 trace(icom_port, "RCV_COUNT", count);
756 755
757 trace(icom_port, "REAL_COUNT", count); 756 trace(icom_port, "REAL_COUNT", count);
@@ -1654,4 +1653,6 @@ MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
1654MODULE_SUPPORTED_DEVICE 1653MODULE_SUPPORTED_DEVICE
1655 ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters"); 1654 ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");
1656MODULE_LICENSE("GPL"); 1655MODULE_LICENSE("GPL");
1657 1656MODULE_FIRMWARE("icom_call_setup.bin");
1657MODULE_FIRMWARE("icom_res_dce.bin");
1658MODULE_FIRMWARE("icom_asc.bin");
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 60d665a17a88..d00fcf8e6c70 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1279,7 +1279,7 @@ static int serial_imx_probe(struct platform_device *pdev)
1279 sport->use_irda = 1; 1279 sport->use_irda = 1;
1280#endif 1280#endif
1281 1281
1282 if (pdata->init) { 1282 if (pdata && pdata->init) {
1283 ret = pdata->init(pdev); 1283 ret = pdata->init(pdev);
1284 if (ret) 1284 if (ret)
1285 goto clkput; 1285 goto clkput;
@@ -1292,7 +1292,7 @@ static int serial_imx_probe(struct platform_device *pdev)
1292 1292
1293 return 0; 1293 return 0;
1294deinit: 1294deinit:
1295 if (pdata->exit) 1295 if (pdata && pdata->exit)
1296 pdata->exit(pdev); 1296 pdata->exit(pdev);
1297clkput: 1297clkput:
1298 clk_put(sport->clk); 1298 clk_put(sport->clk);
@@ -1321,7 +1321,7 @@ static int serial_imx_remove(struct platform_device *pdev)
1321 1321
1322 clk_disable(sport->clk); 1322 clk_disable(sport->clk);
1323 1323
1324 if (pdata->exit) 1324 if (pdata && pdata->exit)
1325 pdata->exit(pdev); 1325 pdata->exit(pdev);
1326 1326
1327 iounmap(sport->port.membase); 1327 iounmap(sport->port.membase);
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 85dc0410ac1a..23ba6b40b3ac 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -1411,8 +1411,7 @@ static int receive_chars(struct uart_port *the_port)
1411 read_count = do_read(the_port, ch, MAX_CHARS); 1411 read_count = do_read(the_port, ch, MAX_CHARS);
1412 if (read_count > 0) { 1412 if (read_count > 0) {
1413 flip = 1; 1413 flip = 1;
1414 read_room = tty_buffer_request_room(tty, read_count); 1414 read_room = tty_insert_flip_string(tty, ch, read_count);
1415 tty_insert_flip_string(tty, ch, read_room);
1416 the_port->icount.rx += read_count; 1415 the_port->icount.rx += read_count;
1417 } 1416 }
1418 spin_unlock_irqrestore(&the_port->lock, pflags); 1417 spin_unlock_irqrestore(&the_port->lock, pflags);
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 108c3e0471fd..12cb5e446a4f 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -179,6 +179,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
179 179
180 return 0; 180 return 0;
181 out_free_irq: 181 out_free_irq:
182 jsm_remove_uart_port(brd);
182 free_irq(brd->irq, brd); 183 free_irq(brd->irq, brd);
183 out_iounmap: 184 out_iounmap:
184 iounmap(brd->re_map_membase); 185 iounmap(brd->re_map_membase);
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index cd95e215550d..5673ca9dfdc8 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -432,7 +432,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
432 432
433int jsm_uart_port_init(struct jsm_board *brd) 433int jsm_uart_port_init(struct jsm_board *brd)
434{ 434{
435 int i; 435 int i, rc;
436 unsigned int line; 436 unsigned int line;
437 struct jsm_channel *ch; 437 struct jsm_channel *ch;
438 438
@@ -467,8 +467,11 @@ int jsm_uart_port_init(struct jsm_board *brd)
467 } else 467 } else
468 set_bit(line, linemap); 468 set_bit(line, linemap);
469 brd->channels[i]->uart_port.line = line; 469 brd->channels[i]->uart_port.line = line;
470 if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) 470 rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
471 printk(KERN_INFO "jsm: add device failed\n"); 471 if (rc){
472 printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
473 return rc;
474 }
472 else 475 else
473 printk(KERN_INFO "jsm: Port %d added\n", i); 476 printk(KERN_INFO "jsm: Port %d added\n", i);
474 } 477 }
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
index b05c5aa02cb4..ecdc0facf7ee 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/serial/msm_serial.c
@@ -691,6 +691,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
691 struct msm_port *msm_port; 691 struct msm_port *msm_port;
692 struct resource *resource; 692 struct resource *resource;
693 struct uart_port *port; 693 struct uart_port *port;
694 int irq;
694 695
695 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) 696 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
696 return -ENXIO; 697 return -ENXIO;
@@ -711,9 +712,10 @@ static int __init msm_serial_probe(struct platform_device *pdev)
711 return -ENXIO; 712 return -ENXIO;
712 port->mapbase = resource->start; 713 port->mapbase = resource->start;
713 714
714 port->irq = platform_get_irq(pdev, 0); 715 irq = platform_get_irq(pdev, 0);
715 if (unlikely(port->irq < 0)) 716 if (unlikely(irq < 0))
716 return -ENXIO; 717 return -ENXIO;
718 port->irq = irq;
717 719
718 platform_set_drvdata(pdev, port); 720 platform_set_drvdata(pdev, port);
719 721
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 34b31da01d09..7bf10264a6ac 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -421,7 +421,7 @@ static struct uart_driver timbuart_driver = {
421 421
422static int timbuart_probe(struct platform_device *dev) 422static int timbuart_probe(struct platform_device *dev)
423{ 423{
424 int err; 424 int err, irq;
425 struct timbuart_port *uart; 425 struct timbuart_port *uart;
426 struct resource *iomem; 426 struct resource *iomem;
427 427
@@ -453,11 +453,12 @@ static int timbuart_probe(struct platform_device *dev)
453 uart->port.mapbase = iomem->start; 453 uart->port.mapbase = iomem->start;
454 uart->port.membase = NULL; 454 uart->port.membase = NULL;
455 455
456 uart->port.irq = platform_get_irq(dev, 0); 456 irq = platform_get_irq(dev, 0);
457 if (uart->port.irq < 0) { 457 if (irq < 0) {
458 err = -EINVAL; 458 err = -EINVAL;
459 goto err_register; 459 goto err_register;
460 } 460 }
461 uart->port.irq = irq;
461 462
462 tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart); 463 tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
463 464
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index d8992d10d555..f6e34e03c8e4 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -144,7 +144,7 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
144 case USB_SPEED_LOW: 144 case USB_SPEED_LOW:
145 case USB_SPEED_FULL: 145 case USB_SPEED_FULL:
146 case USB_SPEED_HIGH: 146 case USB_SPEED_HIGH:
147 case USB_SPEED_VARIABLE: 147 case USB_SPEED_WIRELESS:
148 break; 148 break;
149 default: 149 default:
150 usbip_uerr("speed %d\n", speed); 150 usbip_uerr("speed %d\n", speed);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4f5bb5698f5d..6a58cb1330c1 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -21,6 +21,7 @@ config USB_ARCH_HAS_HCD
21 default y if USB_ARCH_HAS_EHCI 21 default y if USB_ARCH_HAS_EHCI
22 default y if PCMCIA && !M32R # sl811_cs 22 default y if PCMCIA && !M32R # sl811_cs
23 default y if ARM # SL-811 23 default y if ARM # SL-811
24 default y if BLACKFIN # SL-811
24 default y if SUPERH # r8a66597-hcd 25 default y if SUPERH # r8a66597-hcd
25 default PCI 26 default PCI
26 27
@@ -39,6 +40,7 @@ config USB_ARCH_HAS_OHCI
39 default y if ARCH_PNX4008 && I2C 40 default y if ARCH_PNX4008 && I2C
40 default y if MFD_TC6393XB 41 default y if MFD_TC6393XB
41 default y if ARCH_W90X900 42 default y if ARCH_W90X900
43 default y if ARCH_DAVINCI_DA8XX
42 # PPC: 44 # PPC:
43 default y if STB03xxx 45 default y if STB03xxx
44 default y if PPC_MPC52xx 46 default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b80bc9f..80b4008c89ba 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/
21obj-$(CONFIG_USB_R8A66597_HCD) += host/ 21obj-$(CONFIG_USB_R8A66597_HCD) += host/
22obj-$(CONFIG_USB_HWA_HCD) += host/ 22obj-$(CONFIG_USB_HWA_HCD) += host/
23obj-$(CONFIG_USB_ISP1760_HCD) += host/ 23obj-$(CONFIG_USB_ISP1760_HCD) += host/
24obj-$(CONFIG_USB_IMX21_HCD) += host/
24 25
25obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ 26obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
26 27
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 56802d2e994b..c89990f5e018 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan 5 * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
6 * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru) 6 * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
7 * Copyright (C) 2007 Simon Arlott 7 * Copyright (C) 2007 Simon Arlott
8 * Copyright (C) 2009 Simon Arlott
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@
43#include "usbatm.h" 44#include "usbatm.h"
44 45
45#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott" 46#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
46#define DRIVER_VERSION "0.3" 47#define DRIVER_VERSION "0.4"
47#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver" 48#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
48 49
49static const char cxacru_driver_name[] = "cxacru"; 50static const char cxacru_driver_name[] = "cxacru";
@@ -52,6 +53,7 @@ static const char cxacru_driver_name[] = "cxacru";
52#define CXACRU_EP_DATA 0x02 /* Bulk in/out */ 53#define CXACRU_EP_DATA 0x02 /* Bulk in/out */
53 54
54#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */ 55#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */
56#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2)
55 57
56/* Addresses */ 58/* Addresses */
57#define PLLFCLK_ADDR 0x00350068 59#define PLLFCLK_ADDR 0x00350068
@@ -105,6 +107,26 @@ enum cxacru_cm_request {
105 CM_REQUEST_MAX, 107 CM_REQUEST_MAX,
106}; 108};
107 109
110/* commands for interaction with the flash memory
111 *
112 * read: response is the contents of the first 60 bytes of flash memory
113 * write: request contains the 60 bytes of data to write to flash memory
114 * response is the contents of the first 60 bytes of flash memory
115 *
116 * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS
117 * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00
118 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
119 *
120 * P: le16 USB Product ID
121 * V: le16 USB Vendor ID
122 * M: be48 MAC Address
123 * S: le16 ASCII Serial Number
124 */
125enum cxacru_cm_flash {
126 CM_FLASH_READ = 0xa1,
127 CM_FLASH_WRITE = 0xa2
128};
129
108/* reply codes to the commands above */ 130/* reply codes to the commands above */
109enum cxacru_cm_status { 131enum cxacru_cm_status {
110 CM_STATUS_UNDEFINED, 132 CM_STATUS_UNDEFINED,
@@ -196,23 +218,32 @@ static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL)
196static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \ 218static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
197 cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name) 219 cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
198 220
221#define CXACRU_SET_INIT(_name) \
222static DEVICE_ATTR(_name, S_IWUSR, \
223 NULL, cxacru_sysfs_store_##_name)
224
199#define CXACRU_ATTR_INIT(_value, _type, _name) \ 225#define CXACRU_ATTR_INIT(_value, _type, _name) \
200static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \ 226static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
201 struct device_attribute *attr, char *buf) \ 227 struct device_attribute *attr, char *buf) \
202{ \ 228{ \
203 struct usb_interface *intf = to_usb_interface(dev); \ 229 struct cxacru_data *instance = to_usbatm_driver_data(\
204 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); \ 230 to_usb_interface(dev)); \
205 struct cxacru_data *instance = usbatm_instance->driver_data; \ 231\
232 if (instance == NULL) \
233 return -ENODEV; \
234\
206 return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \ 235 return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
207} \ 236} \
208CXACRU__ATTR_INIT(_name) 237CXACRU__ATTR_INIT(_name)
209 238
210#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name) 239#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
211#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) 240#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
241#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
212#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) 242#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
213 243
214#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name) 244#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
215#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) 245#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
246#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
216#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) 247#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
217 248
218static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf) 249static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
@@ -267,12 +298,12 @@ static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
267static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) 298static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
268{ 299{
269 static char *str[] = { 300 static char *str[] = {
270 NULL, 301 "",
271 "ANSI T1.413", 302 "ANSI T1.413",
272 "ITU-T G.992.1 (G.DMT)", 303 "ITU-T G.992.1 (G.DMT)",
273 "ITU-T G.992.2 (G.LITE)" 304 "ITU-T G.992.2 (G.LITE)"
274 }; 305 };
275 if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL)) 306 if (unlikely(value >= ARRAY_SIZE(str)))
276 return snprintf(buf, PAGE_SIZE, "%u\n", value); 307 return snprintf(buf, PAGE_SIZE, "%u\n", value);
277 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); 308 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
278} 309}
@@ -288,22 +319,28 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
288static ssize_t cxacru_sysfs_show_mac_address(struct device *dev, 319static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
289 struct device_attribute *attr, char *buf) 320 struct device_attribute *attr, char *buf)
290{ 321{
291 struct usb_interface *intf = to_usb_interface(dev); 322 struct cxacru_data *instance = to_usbatm_driver_data(
292 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); 323 to_usb_interface(dev));
293 struct atm_dev *atm_dev = usbatm_instance->atm_dev;
294 324
295 return snprintf(buf, PAGE_SIZE, "%pM\n", atm_dev->esi); 325 if (instance == NULL || instance->usbatm->atm_dev == NULL)
326 return -ENODEV;
327
328 return snprintf(buf, PAGE_SIZE, "%pM\n",
329 instance->usbatm->atm_dev->esi);
296} 330}
297 331
298static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev, 332static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
299 struct device_attribute *attr, char *buf) 333 struct device_attribute *attr, char *buf)
300{ 334{
301 struct usb_interface *intf = to_usb_interface(dev);
302 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
303 struct cxacru_data *instance = usbatm_instance->driver_data;
304 u32 value = instance->card_info[CXINF_LINE_STARTABLE];
305
306 static char *str[] = { "running", "stopped" }; 335 static char *str[] = { "running", "stopped" };
336 struct cxacru_data *instance = to_usbatm_driver_data(
337 to_usb_interface(dev));
338 u32 value;
339
340 if (instance == NULL)
341 return -ENODEV;
342
343 value = instance->card_info[CXINF_LINE_STARTABLE];
307 if (unlikely(value >= ARRAY_SIZE(str))) 344 if (unlikely(value >= ARRAY_SIZE(str)))
308 return snprintf(buf, PAGE_SIZE, "%u\n", value); 345 return snprintf(buf, PAGE_SIZE, "%u\n", value);
309 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); 346 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -312,9 +349,8 @@ static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
312static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev, 349static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
313 struct device_attribute *attr, const char *buf, size_t count) 350 struct device_attribute *attr, const char *buf, size_t count)
314{ 351{
315 struct usb_interface *intf = to_usb_interface(dev); 352 struct cxacru_data *instance = to_usbatm_driver_data(
316 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); 353 to_usb_interface(dev));
317 struct cxacru_data *instance = usbatm_instance->driver_data;
318 int ret; 354 int ret;
319 int poll = -1; 355 int poll = -1;
320 char str_cmd[8]; 356 char str_cmd[8];
@@ -328,13 +364,16 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
328 return -EINVAL; 364 return -EINVAL;
329 ret = 0; 365 ret = 0;
330 366
367 if (instance == NULL)
368 return -ENODEV;
369
331 if (mutex_lock_interruptible(&instance->adsl_state_serialize)) 370 if (mutex_lock_interruptible(&instance->adsl_state_serialize))
332 return -ERESTARTSYS; 371 return -ERESTARTSYS;
333 372
334 if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) { 373 if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
335 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0); 374 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
336 if (ret < 0) { 375 if (ret < 0) {
337 atm_err(usbatm_instance, "change adsl state:" 376 atm_err(instance->usbatm, "change adsl state:"
338 " CHIP_ADSL_LINE_STOP returned %d\n", ret); 377 " CHIP_ADSL_LINE_STOP returned %d\n", ret);
339 378
340 ret = -EIO; 379 ret = -EIO;
@@ -354,7 +393,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
354 if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) { 393 if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
355 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); 394 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
356 if (ret < 0) { 395 if (ret < 0) {
357 atm_err(usbatm_instance, "change adsl state:" 396 atm_err(instance->usbatm, "change adsl state:"
358 " CHIP_ADSL_LINE_START returned %d\n", ret); 397 " CHIP_ADSL_LINE_START returned %d\n", ret);
359 398
360 ret = -EIO; 399 ret = -EIO;
@@ -407,6 +446,72 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
407 return ret; 446 return ret;
408} 447}
409 448
449/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
450
451static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
452 struct device_attribute *attr, const char *buf, size_t count)
453{
454 struct cxacru_data *instance = to_usbatm_driver_data(
455 to_usb_interface(dev));
456 int len = strlen(buf);
457 int ret, pos, num;
458 __le32 data[CMD_PACKET_SIZE / 4];
459
460 if (!capable(CAP_NET_ADMIN))
461 return -EACCES;
462
463 if (instance == NULL)
464 return -ENODEV;
465
466 pos = 0;
467 num = 0;
468 while (pos < len) {
469 int tmp;
470 u32 index;
471 u32 value;
472
473 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
474 if (ret < 2)
475 return -EINVAL;
476 if (index < 0 || index > 0x7f)
477 return -EINVAL;
478 pos += tmp;
479
480 /* skip trailing newline */
481 if (buf[pos] == '\n' && pos == len-1)
482 pos++;
483
484 data[num * 2 + 1] = cpu_to_le32(index);
485 data[num * 2 + 2] = cpu_to_le32(value);
486 num++;
487
488 /* send config values when data buffer is full
489 * or no more data
490 */
491 if (pos >= len || num >= CMD_MAX_CONFIG) {
492 char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */
493
494 data[0] = cpu_to_le32(num);
495 ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
496 (u8 *) data, 4 + num * 8, NULL, 0);
497 if (ret < 0) {
498 atm_err(instance->usbatm,
499 "set card data returned %d\n", ret);
500 return -EIO;
501 }
502
503 for (tmp = 0; tmp < num; tmp++)
504 snprintf(log + tmp*12, 13, " %02x=%08x",
505 le32_to_cpu(data[tmp * 2 + 1]),
506 le32_to_cpu(data[tmp * 2 + 2]));
507 atm_info(instance->usbatm, "config%s\n", log);
508 num = 0;
509 }
510 }
511
512 return len;
513}
514
410/* 515/*
411 * All device attributes are included in CXACRU_ALL_FILES 516 * All device attributes are included in CXACRU_ALL_FILES
412 * so that the same list can be used multiple times: 517 * so that the same list can be used multiple times:
@@ -442,7 +547,8 @@ CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \
442CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \ 547CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
443CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \ 548CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
444CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \ 549CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
445CXACRU_CMD_##_action( adsl_state); 550CXACRU_CMD_##_action( adsl_state); \
551CXACRU_SET_##_action( adsl_config);
446 552
447CXACRU_ALL_FILES(INIT); 553CXACRU_ALL_FILES(INIT);
448 554
@@ -596,7 +702,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
596 len = ret / 4; 702 len = ret / 4;
597 for (offb = 0; offb < len; ) { 703 for (offb = 0; offb < len; ) {
598 int l = le32_to_cpu(buf[offb++]); 704 int l = le32_to_cpu(buf[offb++]);
599 if (l > stride || l > (len - offb) / 2) { 705 if (l < 0 || l > stride || l > (len - offb) / 2) {
600 if (printk_ratelimit()) 706 if (printk_ratelimit())
601 usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n", 707 usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
602 cm, l); 708 cm, l);
@@ -649,9 +755,6 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
649{ 755{
650 struct cxacru_data *instance = usbatm_instance->driver_data; 756 struct cxacru_data *instance = usbatm_instance->driver_data;
651 struct usb_interface *intf = usbatm_instance->usb_intf; 757 struct usb_interface *intf = usbatm_instance->usb_intf;
652 /*
653 struct atm_dev *atm_dev = usbatm_instance->atm_dev;
654 */
655 int ret; 758 int ret;
656 int start_polling = 1; 759 int start_polling = 1;
657 760
@@ -697,6 +800,9 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
697 mutex_unlock(&instance->poll_state_serialize); 800 mutex_unlock(&instance->poll_state_serialize);
698 mutex_unlock(&instance->adsl_state_serialize); 801 mutex_unlock(&instance->adsl_state_serialize);
699 802
803 printk(KERN_INFO "%s%d: %s %pM\n", atm_dev->type, atm_dev->number,
804 usbatm_instance->description, atm_dev->esi);
805
700 if (start_polling) 806 if (start_polling)
701 cxacru_poll_status(&instance->poll_work.work); 807 cxacru_poll_status(&instance->poll_work.work);
702 return 0; 808 return 0;
@@ -873,11 +979,9 @@ cleanup:
873 979
874static void cxacru_upload_firmware(struct cxacru_data *instance, 980static void cxacru_upload_firmware(struct cxacru_data *instance,
875 const struct firmware *fw, 981 const struct firmware *fw,
876 const struct firmware *bp, 982 const struct firmware *bp)
877 const struct firmware *cf)
878{ 983{
879 int ret; 984 int ret;
880 int off;
881 struct usbatm_data *usbatm = instance->usbatm; 985 struct usbatm_data *usbatm = instance->usbatm;
882 struct usb_device *usb_dev = usbatm->usb_dev; 986 struct usb_device *usb_dev = usbatm->usb_dev;
883 __le16 signature[] = { usb_dev->descriptor.idVendor, 987 __le16 signature[] = { usb_dev->descriptor.idVendor,
@@ -911,6 +1015,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
911 } 1015 }
912 1016
913 /* Firmware */ 1017 /* Firmware */
1018 usb_info(usbatm, "loading firmware\n");
914 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size); 1019 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
915 if (ret) { 1020 if (ret) {
916 usb_err(usbatm, "Firmware upload failed: %d\n", ret); 1021 usb_err(usbatm, "Firmware upload failed: %d\n", ret);
@@ -919,6 +1024,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
919 1024
920 /* Boot ROM patch */ 1025 /* Boot ROM patch */
921 if (instance->modem_type->boot_rom_patch) { 1026 if (instance->modem_type->boot_rom_patch) {
1027 usb_info(usbatm, "loading boot ROM patch\n");
922 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size); 1028 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
923 if (ret) { 1029 if (ret) {
924 usb_err(usbatm, "Boot ROM patching failed: %d\n", ret); 1030 usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
@@ -933,6 +1039,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
933 return; 1039 return;
934 } 1040 }
935 1041
1042 usb_info(usbatm, "starting device\n");
936 if (instance->modem_type->boot_rom_patch) { 1043 if (instance->modem_type->boot_rom_patch) {
937 val = cpu_to_le32(BR_ADDR); 1044 val = cpu_to_le32(BR_ADDR);
938 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4); 1045 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
@@ -958,26 +1065,6 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
958 usb_err(usbatm, "modem failed to initialize: %d\n", ret); 1065 usb_err(usbatm, "modem failed to initialize: %d\n", ret);
959 return; 1066 return;
960 } 1067 }
961
962 /* Load config data (le32), doing one packet at a time */
963 if (cf)
964 for (off = 0; off < cf->size / 4; ) {
965 __le32 buf[CMD_PACKET_SIZE / 4 - 1];
966 int i, len = min_t(int, cf->size / 4 - off, CMD_PACKET_SIZE / 4 / 2 - 1);
967 buf[0] = cpu_to_le32(len);
968 for (i = 0; i < len; i++, off++) {
969 buf[i * 2 + 1] = cpu_to_le32(off);
970 memcpy(buf + i * 2 + 2, cf->data + off * 4, 4);
971 }
972 ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
973 (u8 *) buf, len, NULL, 0);
974 if (ret < 0) {
975 usb_err(usbatm, "load config data failed: %d\n", ret);
976 return;
977 }
978 }
979
980 msleep_interruptible(4000);
981} 1068}
982 1069
983static int cxacru_find_firmware(struct cxacru_data *instance, 1070static int cxacru_find_firmware(struct cxacru_data *instance,
@@ -1003,7 +1090,7 @@ static int cxacru_find_firmware(struct cxacru_data *instance,
1003static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, 1090static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
1004 struct usb_interface *usb_intf) 1091 struct usb_interface *usb_intf)
1005{ 1092{
1006 const struct firmware *fw, *bp, *cf; 1093 const struct firmware *fw, *bp;
1007 struct cxacru_data *instance = usbatm_instance->driver_data; 1094 struct cxacru_data *instance = usbatm_instance->driver_data;
1008 1095
1009 int ret = cxacru_find_firmware(instance, "fw", &fw); 1096 int ret = cxacru_find_firmware(instance, "fw", &fw);
@@ -1021,13 +1108,8 @@ static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
1021 } 1108 }
1022 } 1109 }
1023 1110
1024 if (cxacru_find_firmware(instance, "cf", &cf)) /* optional */ 1111 cxacru_upload_firmware(instance, fw, bp);
1025 cf = NULL;
1026
1027 cxacru_upload_firmware(instance, fw, bp, cf);
1028 1112
1029 if (cf)
1030 release_firmware(cf);
1031 if (instance->modem_type->boot_rom_patch) 1113 if (instance->modem_type->boot_rom_patch)
1032 release_firmware(bp); 1114 release_firmware(bp);
1033 release_firmware(fw); 1115 release_firmware(fw);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index fbea8563df1e..9b53e8df4648 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1333,6 +1333,7 @@ void usbatm_usb_disconnect(struct usb_interface *intf)
1333 if (instance->atm_dev) { 1333 if (instance->atm_dev) {
1334 sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device"); 1334 sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device");
1335 atm_dev_deregister(instance->atm_dev); 1335 atm_dev_deregister(instance->atm_dev);
1336 instance->atm_dev = NULL;
1336 } 1337 }
1337 1338
1338 usbatm_put_instance(instance); /* taken in usbatm_usb_probe */ 1339 usbatm_put_instance(instance); /* taken in usbatm_usb_probe */
@@ -1348,7 +1349,7 @@ static int __init usbatm_usb_init(void)
1348{ 1349{
1349 dbg("%s: driver version %s", __func__, DRIVER_VERSION); 1350 dbg("%s: driver version %s", __func__, DRIVER_VERSION);
1350 1351
1351 if (sizeof(struct usbatm_control) > sizeof(((struct sk_buff *) 0)->cb)) { 1352 if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) {
1352 printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name); 1353 printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name);
1353 return -EIO; 1354 return -EIO;
1354 } 1355 }
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index f6f4508a9d42..0863f85fcc26 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -204,4 +204,19 @@ struct usbatm_data {
204 struct urb *urbs[0]; 204 struct urb *urbs[0];
205}; 205};
206 206
207static inline void *to_usbatm_driver_data(struct usb_interface *intf)
208{
209 struct usbatm_data *usbatm_instance;
210
211 if (intf == NULL)
212 return NULL;
213
214 usbatm_instance = usb_get_intfdata(intf);
215
216 if (usbatm_instance == NULL) /* set NULL before unbind() */
217 return NULL;
218
219 return usbatm_instance->driver_data; /* set NULL after unbind() */
220}
221
207#endif /* _USBATM_H_ */ 222#endif /* _USBATM_H_ */
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 5633bc5c8bf2..029ee4a8a1f3 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -137,13 +137,13 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
137 if (!c67x00) 137 if (!c67x00)
138 return -ENOMEM; 138 return -ENOMEM;
139 139
140 if (!request_mem_region(res->start, res->end - res->start + 1, 140 if (!request_mem_region(res->start, resource_size(res),
141 pdev->name)) { 141 pdev->name)) {
142 dev_err(&pdev->dev, "Memory region busy\n"); 142 dev_err(&pdev->dev, "Memory region busy\n");
143 ret = -EBUSY; 143 ret = -EBUSY;
144 goto request_mem_failed; 144 goto request_mem_failed;
145 } 145 }
146 c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1); 146 c67x00->hpi.base = ioremap(res->start, resource_size(res));
147 if (!c67x00->hpi.base) { 147 if (!c67x00->hpi.base) {
148 dev_err(&pdev->dev, "Unable to map HPI registers\n"); 148 dev_err(&pdev->dev, "Unable to map HPI registers\n");
149 ret = -EIO; 149 ret = -EIO;
@@ -182,7 +182,7 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
182 request_irq_failed: 182 request_irq_failed:
183 iounmap(c67x00->hpi.base); 183 iounmap(c67x00->hpi.base);
184 map_failed: 184 map_failed:
185 release_mem_region(res->start, res->end - res->start + 1); 185 release_mem_region(res->start, resource_size(res));
186 request_mem_failed: 186 request_mem_failed:
187 kfree(c67x00); 187 kfree(c67x00);
188 188
@@ -208,7 +208,7 @@ static int __devexit c67x00_drv_remove(struct platform_device *pdev)
208 208
209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
210 if (res) 210 if (res)
211 release_mem_region(res->start, res->end - res->start + 1); 211 release_mem_region(res->start, resource_size(res));
212 212
213 kfree(c67x00); 213 kfree(c67x00);
214 214
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d4eb98829e..975d556b4787 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
170{ 170{
171 wb->use = 0; 171 wb->use = 0;
172 acm->transmitting--; 172 acm->transmitting--;
173 usb_autopm_put_interface_async(acm->control);
173} 174}
174 175
175/* 176/*
@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
211 } 212 }
212 213
213 dbg("%s susp_count: %d", __func__, acm->susp_count); 214 dbg("%s susp_count: %d", __func__, acm->susp_count);
215 usb_autopm_get_interface_async(acm->control);
214 if (acm->susp_count) { 216 if (acm->susp_count) {
215 acm->delayed_wb = wb; 217 if (!acm->delayed_wb)
216 schedule_work(&acm->waker); 218 acm->delayed_wb = wb;
219 else
220 usb_autopm_put_interface_async(acm->control);
217 spin_unlock_irqrestore(&acm->write_lock, flags); 221 spin_unlock_irqrestore(&acm->write_lock, flags);
218 return 0; /* A white lie */ 222 return 0; /* A white lie */
219 } 223 }
@@ -424,7 +428,6 @@ next_buffer:
424 throttled = acm->throttle; 428 throttled = acm->throttle;
425 spin_unlock_irqrestore(&acm->throttle_lock, flags); 429 spin_unlock_irqrestore(&acm->throttle_lock, flags);
426 if (!throttled) { 430 if (!throttled) {
427 tty_buffer_request_room(tty, buf->size);
428 tty_insert_flip_string(tty, buf->base, buf->size); 431 tty_insert_flip_string(tty, buf->base, buf->size);
429 tty_flip_buffer_push(tty); 432 tty_flip_buffer_push(tty);
430 } else { 433 } else {
@@ -534,23 +537,6 @@ static void acm_softint(struct work_struct *work)
534 tty_kref_put(tty); 537 tty_kref_put(tty);
535} 538}
536 539
537static void acm_waker(struct work_struct *waker)
538{
539 struct acm *acm = container_of(waker, struct acm, waker);
540 int rv;
541
542 rv = usb_autopm_get_interface(acm->control);
543 if (rv < 0) {
544 dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
545 return;
546 }
547 if (acm->delayed_wb) {
548 acm_start_wb(acm, acm->delayed_wb);
549 acm->delayed_wb = NULL;
550 }
551 usb_autopm_put_interface(acm->control);
552}
553
554/* 540/*
555 * TTY handlers 541 * TTY handlers
556 */ 542 */
@@ -566,7 +552,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
566 552
567 acm = acm_table[tty->index]; 553 acm = acm_table[tty->index];
568 if (!acm || !acm->dev) 554 if (!acm || !acm->dev)
569 goto err_out; 555 goto out;
570 else 556 else
571 rv = 0; 557 rv = 0;
572 558
@@ -582,8 +568,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
582 568
583 mutex_lock(&acm->mutex); 569 mutex_lock(&acm->mutex);
584 if (acm->port.count++) { 570 if (acm->port.count++) {
571 mutex_unlock(&acm->mutex);
585 usb_autopm_put_interface(acm->control); 572 usb_autopm_put_interface(acm->control);
586 goto done; 573 goto out;
587 } 574 }
588 575
589 acm->ctrlurb->dev = acm->dev; 576 acm->ctrlurb->dev = acm->dev;
@@ -612,18 +599,18 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
612 set_bit(ASYNCB_INITIALIZED, &acm->port.flags); 599 set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
613 rv = tty_port_block_til_ready(&acm->port, tty, filp); 600 rv = tty_port_block_til_ready(&acm->port, tty, filp);
614 tasklet_schedule(&acm->urb_task); 601 tasklet_schedule(&acm->urb_task);
615done: 602
616 mutex_unlock(&acm->mutex); 603 mutex_unlock(&acm->mutex);
617err_out: 604out:
618 mutex_unlock(&open_mutex); 605 mutex_unlock(&open_mutex);
619 return rv; 606 return rv;
620 607
621full_bailout: 608full_bailout:
622 usb_kill_urb(acm->ctrlurb); 609 usb_kill_urb(acm->ctrlurb);
623bail_out: 610bail_out:
624 usb_autopm_put_interface(acm->control);
625 acm->port.count--; 611 acm->port.count--;
626 mutex_unlock(&acm->mutex); 612 mutex_unlock(&acm->mutex);
613 usb_autopm_put_interface(acm->control);
627early_bail: 614early_bail:
628 mutex_unlock(&open_mutex); 615 mutex_unlock(&open_mutex);
629 tty_port_tty_set(&acm->port, NULL); 616 tty_port_tty_set(&acm->port, NULL);
@@ -1023,7 +1010,7 @@ static int acm_probe(struct usb_interface *intf,
1023 case USB_CDC_CALL_MANAGEMENT_TYPE: 1010 case USB_CDC_CALL_MANAGEMENT_TYPE:
1024 call_management_function = buffer[3]; 1011 call_management_function = buffer[3];
1025 call_interface_num = buffer[4]; 1012 call_interface_num = buffer[4];
1026 if ((call_management_function & 3) != 3) 1013 if ( (quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
1027 dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n"); 1014 dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
1028 break; 1015 break;
1029 default: 1016 default:
@@ -1178,7 +1165,6 @@ made_compressed_probe:
1178 acm->urb_task.func = acm_rx_tasklet; 1165 acm->urb_task.func = acm_rx_tasklet;
1179 acm->urb_task.data = (unsigned long) acm; 1166 acm->urb_task.data = (unsigned long) acm;
1180 INIT_WORK(&acm->work, acm_softint); 1167 INIT_WORK(&acm->work, acm_softint);
1181 INIT_WORK(&acm->waker, acm_waker);
1182 init_waitqueue_head(&acm->drain_wait); 1168 init_waitqueue_head(&acm->drain_wait);
1183 spin_lock_init(&acm->throttle_lock); 1169 spin_lock_init(&acm->throttle_lock);
1184 spin_lock_init(&acm->write_lock); 1170 spin_lock_init(&acm->write_lock);
@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
1343 tasklet_enable(&acm->urb_task); 1329 tasklet_enable(&acm->urb_task);
1344 1330
1345 cancel_work_sync(&acm->work); 1331 cancel_work_sync(&acm->work);
1346 cancel_work_sync(&acm->waker);
1347} 1332}
1348 1333
1349static void acm_disconnect(struct usb_interface *intf) 1334static void acm_disconnect(struct usb_interface *intf)
@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1435static int acm_resume(struct usb_interface *intf) 1420static int acm_resume(struct usb_interface *intf)
1436{ 1421{
1437 struct acm *acm = usb_get_intfdata(intf); 1422 struct acm *acm = usb_get_intfdata(intf);
1423 struct acm_wb *wb;
1438 int rv = 0; 1424 int rv = 0;
1439 int cnt; 1425 int cnt;
1440 1426
@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
1449 mutex_lock(&acm->mutex); 1435 mutex_lock(&acm->mutex);
1450 if (acm->port.count) { 1436 if (acm->port.count) {
1451 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); 1437 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
1438
1439 spin_lock_irq(&acm->write_lock);
1440 if (acm->delayed_wb) {
1441 wb = acm->delayed_wb;
1442 acm->delayed_wb = NULL;
1443 spin_unlock_irq(&acm->write_lock);
1444 acm_start_wb(acm, acm->delayed_wb);
1445 } else {
1446 spin_unlock_irq(&acm->write_lock);
1447 }
1448
1449 /*
1450 * delayed error checking because we must
1451 * do the write path at all cost
1452 */
1452 if (rv < 0) 1453 if (rv < 0)
1453 goto err_out; 1454 goto err_out;
1454 1455
@@ -1460,6 +1461,23 @@ err_out:
1460 return rv; 1461 return rv;
1461} 1462}
1462 1463
1464static int acm_reset_resume(struct usb_interface *intf)
1465{
1466 struct acm *acm = usb_get_intfdata(intf);
1467 struct tty_struct *tty;
1468
1469 mutex_lock(&acm->mutex);
1470 if (acm->port.count) {
1471 tty = tty_port_tty_get(&acm->port);
1472 if (tty) {
1473 tty_hangup(tty);
1474 tty_kref_put(tty);
1475 }
1476 }
1477 mutex_unlock(&acm->mutex);
1478 return acm_resume(intf);
1479}
1480
1463#endif /* CONFIG_PM */ 1481#endif /* CONFIG_PM */
1464 1482
1465#define NOKIA_PCSUITE_ACM_INFO(x) \ 1483#define NOKIA_PCSUITE_ACM_INFO(x) \
@@ -1471,7 +1489,7 @@ err_out:
1471 * USB driver structure. 1489 * USB driver structure.
1472 */ 1490 */
1473 1491
1474static struct usb_device_id acm_ids[] = { 1492static const struct usb_device_id acm_ids[] = {
1475 /* quirky and broken devices */ 1493 /* quirky and broken devices */
1476 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ 1494 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
1477 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1495 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
@@ -1576,6 +1594,11 @@ static struct usb_device_id acm_ids[] = {
1576 1594
1577 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1595 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1578 1596
1597 /* Support Lego NXT using pbLua firmware */
1598 { USB_DEVICE(0x0694, 0xff00),
1599 .driver_info = NOT_A_MODEM,
1600 },
1601
1579 /* control interfaces with various AT-command sets */ 1602 /* control interfaces with various AT-command sets */
1580 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1603 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1581 USB_CDC_ACM_PROTO_AT_V25TER) }, 1604 USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1602,6 +1625,7 @@ static struct usb_driver acm_driver = {
1602#ifdef CONFIG_PM 1625#ifdef CONFIG_PM
1603 .suspend = acm_suspend, 1626 .suspend = acm_suspend,
1604 .resume = acm_resume, 1627 .resume = acm_resume,
1628 .reset_resume = acm_reset_resume,
1605#endif 1629#endif
1606 .id_table = acm_ids, 1630 .id_table = acm_ids,
1607#ifdef CONFIG_PM 1631#ifdef CONFIG_PM
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index c4a0ee8ffccf..4a8e87ec6ce9 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,7 +112,6 @@ struct acm {
112 struct mutex mutex; 112 struct mutex mutex;
113 struct usb_cdc_line_coding line; /* bits, stop, parity */ 113 struct usb_cdc_line_coding line; /* bits, stop, parity */
114 struct work_struct work; /* work queue entry for line discipline waking up */ 114 struct work_struct work; /* work queue entry for line discipline waking up */
115 struct work_struct waker;
116 wait_queue_head_t drain_wait; /* close processing */ 115 wait_queue_head_t drain_wait; /* close processing */
117 struct tasklet_struct urb_task; /* rx processing */ 116 struct tasklet_struct urb_task; /* rx processing */
118 spinlock_t throttle_lock; /* synchronize throtteling and read callback */ 117 spinlock_t throttle_lock; /* synchronize throtteling and read callback */
@@ -137,3 +136,4 @@ struct acm {
137#define NO_UNION_NORMAL 1 136#define NO_UNION_NORMAL 1
138#define SINGLE_RX_URB 2 137#define SINGLE_RX_URB 2
139#define NO_CAP_LINE 4 138#define NO_CAP_LINE 4
139#define NOT_A_MODEM 8
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e564bfe17d1..18aafcb08fc8 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -31,7 +31,7 @@
31#define DRIVER_AUTHOR "Oliver Neukum" 31#define DRIVER_AUTHOR "Oliver Neukum"
32#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management" 32#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
33 33
34static struct usb_device_id wdm_ids[] = { 34static const struct usb_device_id wdm_ids[] = {
35 { 35 {
36 .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS | 36 .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
37 USB_DEVICE_ID_MATCH_INT_SUBCLASS, 37 USB_DEVICE_ID_MATCH_INT_SUBCLASS,
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 9bc112ee7803..93b5f85d7ceb 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -163,7 +163,6 @@ struct usblp {
163 unsigned char used; /* True if open */ 163 unsigned char used; /* True if open */
164 unsigned char present; /* True if not disconnected */ 164 unsigned char present; /* True if not disconnected */
165 unsigned char bidir; /* interface is bidirectional */ 165 unsigned char bidir; /* interface is bidirectional */
166 unsigned char sleeping; /* interface is suspended */
167 unsigned char no_paper; /* Paper Out happened */ 166 unsigned char no_paper; /* Paper Out happened */
168 unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */ 167 unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */
169 /* first 2 bytes are (big-endian) length */ 168 /* first 2 bytes are (big-endian) length */
@@ -191,7 +190,6 @@ static void usblp_dump(struct usblp *usblp) {
191 dbg("quirks=%d", usblp->quirks); 190 dbg("quirks=%d", usblp->quirks);
192 dbg("used=%d", usblp->used); 191 dbg("used=%d", usblp->used);
193 dbg("bidir=%d", usblp->bidir); 192 dbg("bidir=%d", usblp->bidir);
194 dbg("sleeping=%d", usblp->sleeping);
195 dbg("device_id_string=\"%s\"", 193 dbg("device_id_string=\"%s\"",
196 usblp->device_id_string ? 194 usblp->device_id_string ?
197 usblp->device_id_string + 2 : 195 usblp->device_id_string + 2 :
@@ -376,7 +374,7 @@ static int usblp_check_status(struct usblp *usblp, int err)
376 374
377static int handle_bidir (struct usblp *usblp) 375static int handle_bidir (struct usblp *usblp)
378{ 376{
379 if (usblp->bidir && usblp->used && !usblp->sleeping) { 377 if (usblp->bidir && usblp->used) {
380 if (usblp_submit_read(usblp) < 0) 378 if (usblp_submit_read(usblp) < 0)
381 return -EIO; 379 return -EIO;
382 } 380 }
@@ -503,11 +501,6 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
503 goto done; 501 goto done;
504 } 502 }
505 503
506 if (usblp->sleeping) {
507 retval = -ENODEV;
508 goto done;
509 }
510
511 dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd), 504 dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd),
512 _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) ); 505 _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) );
513 506
@@ -914,8 +907,6 @@ static int usblp_wtest(struct usblp *usblp, int nonblock)
914 return 0; 907 return 0;
915 } 908 }
916 spin_unlock_irqrestore(&usblp->lock, flags); 909 spin_unlock_irqrestore(&usblp->lock, flags);
917 if (usblp->sleeping)
918 return -ENODEV;
919 if (nonblock) 910 if (nonblock)
920 return -EAGAIN; 911 return -EAGAIN;
921 return 1; 912 return 1;
@@ -968,8 +959,6 @@ static int usblp_rtest(struct usblp *usblp, int nonblock)
968 return 0; 959 return 0;
969 } 960 }
970 spin_unlock_irqrestore(&usblp->lock, flags); 961 spin_unlock_irqrestore(&usblp->lock, flags);
971 if (usblp->sleeping)
972 return -ENODEV;
973 if (nonblock) 962 if (nonblock)
974 return -EAGAIN; 963 return -EAGAIN;
975 return 1; 964 return 1;
@@ -1377,12 +1366,10 @@ static void usblp_disconnect(struct usb_interface *intf)
1377 mutex_unlock (&usblp_mutex); 1366 mutex_unlock (&usblp_mutex);
1378} 1367}
1379 1368
1380static int usblp_suspend (struct usb_interface *intf, pm_message_t message) 1369static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
1381{ 1370{
1382 struct usblp *usblp = usb_get_intfdata (intf); 1371 struct usblp *usblp = usb_get_intfdata (intf);
1383 1372
1384 /* we take no more IO */
1385 usblp->sleeping = 1;
1386 usblp_unlink_urbs(usblp); 1373 usblp_unlink_urbs(usblp);
1387#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */ 1374#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
1388 /* not strictly necessary, but just in case */ 1375 /* not strictly necessary, but just in case */
@@ -1393,18 +1380,17 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
1393 return 0; 1380 return 0;
1394} 1381}
1395 1382
1396static int usblp_resume (struct usb_interface *intf) 1383static int usblp_resume(struct usb_interface *intf)
1397{ 1384{
1398 struct usblp *usblp = usb_get_intfdata (intf); 1385 struct usblp *usblp = usb_get_intfdata (intf);
1399 int r; 1386 int r;
1400 1387
1401 usblp->sleeping = 0;
1402 r = handle_bidir (usblp); 1388 r = handle_bidir (usblp);
1403 1389
1404 return r; 1390 return r;
1405} 1391}
1406 1392
1407static struct usb_device_id usblp_ids [] = { 1393static const struct usb_device_id usblp_ids[] = {
1408 { USB_DEVICE_INFO(7, 1, 1) }, 1394 { USB_DEVICE_INFO(7, 1, 1) },
1409 { USB_DEVICE_INFO(7, 1, 2) }, 1395 { USB_DEVICE_INFO(7, 1, 2) },
1410 { USB_DEVICE_INFO(7, 1, 3) }, 1396 { USB_DEVICE_INFO(7, 1, 3) },
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 7c5f4e32c920..8588c0937a89 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -48,7 +48,7 @@
48 */ 48 */
49#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100 49#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100
50 50
51static struct usb_device_id usbtmc_devices[] = { 51static const struct usb_device_id usbtmc_devices[] = {
52 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, 52 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
53 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), }, 53 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
54 { 0, } /* terminating entry */ 54 { 0, } /* terminating entry */
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index ad925946f869..97a819c23ef3 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,8 +91,8 @@ config USB_DYNAMIC_MINORS
91 If you are unsure about this, say N here. 91 If you are unsure about this, say N here.
92 92
93config USB_SUSPEND 93config USB_SUSPEND
94 bool "USB selective suspend/resume and wakeup" 94 bool "USB runtime power management (suspend/resume and wakeup)"
95 depends on USB && PM 95 depends on USB && PM_RUNTIME
96 help 96 help
97 If you say Y here, you can use driver calls or the sysfs 97 If you say Y here, you can use driver calls or the sysfs
98 "power/level" file to suspend or resume individual USB 98 "power/level" file to suspend or resume individual USB
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 355dffcc23b0..c83c975152a6 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -118,6 +118,7 @@ static const char *format_endpt =
118 */ 118 */
119 119
120static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq); 120static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq);
121/* guarded by usbfs_mutex */
121static unsigned int conndiscevcnt; 122static unsigned int conndiscevcnt;
122 123
123/* this struct stores the poll state for <mountpoint>/devices pollers */ 124/* this struct stores the poll state for <mountpoint>/devices pollers */
@@ -156,7 +157,9 @@ static const struct class_info clas_info[] =
156 157
157void usbfs_conn_disc_event(void) 158void usbfs_conn_disc_event(void)
158{ 159{
160 mutex_lock(&usbfs_mutex);
159 conndiscevcnt++; 161 conndiscevcnt++;
162 mutex_unlock(&usbfs_mutex);
160 wake_up(&deviceconndiscwq); 163 wake_up(&deviceconndiscwq);
161} 164}
162 165
@@ -629,42 +632,29 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
629static unsigned int usb_device_poll(struct file *file, 632static unsigned int usb_device_poll(struct file *file,
630 struct poll_table_struct *wait) 633 struct poll_table_struct *wait)
631{ 634{
632 struct usb_device_status *st = file->private_data; 635 struct usb_device_status *st;
633 unsigned int mask = 0; 636 unsigned int mask = 0;
634 637
635 lock_kernel(); 638 mutex_lock(&usbfs_mutex);
639 st = file->private_data;
636 if (!st) { 640 if (!st) {
637 st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL); 641 st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL);
638
639 /* we may have dropped BKL -
640 * need to check for having lost the race */
641 if (file->private_data) {
642 kfree(st);
643 st = file->private_data;
644 goto lost_race;
645 }
646 /* we haven't lost - check for allocation failure now */
647 if (!st) { 642 if (!st) {
648 unlock_kernel(); 643 mutex_unlock(&usbfs_mutex);
649 return POLLIN; 644 return POLLIN;
650 } 645 }
651 646
652 /*
653 * need to prevent the module from being unloaded, since
654 * proc_unregister does not call the release method and
655 * we would have a memory leak
656 */
657 st->lastev = conndiscevcnt; 647 st->lastev = conndiscevcnt;
658 file->private_data = st; 648 file->private_data = st;
659 mask = POLLIN; 649 mask = POLLIN;
660 } 650 }
661lost_race: 651
662 if (file->f_mode & FMODE_READ) 652 if (file->f_mode & FMODE_READ)
663 poll_wait(file, &deviceconndiscwq, wait); 653 poll_wait(file, &deviceconndiscwq, wait);
664 if (st->lastev != conndiscevcnt) 654 if (st->lastev != conndiscevcnt)
665 mask |= POLLIN; 655 mask |= POLLIN;
666 st->lastev = conndiscevcnt; 656 st->lastev = conndiscevcnt;
667 unlock_kernel(); 657 mutex_unlock(&usbfs_mutex);
668 return mask; 658 return mask;
669} 659}
670 660
@@ -685,7 +675,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
685{ 675{
686 loff_t ret; 676 loff_t ret;
687 677
688 lock_kernel(); 678 mutex_lock(&file->f_dentry->d_inode->i_mutex);
689 679
690 switch (orig) { 680 switch (orig) {
691 case 0: 681 case 0:
@@ -701,7 +691,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
701 ret = -EINVAL; 691 ret = -EINVAL;
702 } 692 }
703 693
704 unlock_kernel(); 694 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
705 return ret; 695 return ret;
706} 696}
707 697
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a678186f218f..e909ff7b9094 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -122,7 +122,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
122{ 122{
123 loff_t ret; 123 loff_t ret;
124 124
125 lock_kernel(); 125 mutex_lock(&file->f_dentry->d_inode->i_mutex);
126 126
127 switch (orig) { 127 switch (orig) {
128 case 0: 128 case 0:
@@ -138,7 +138,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
138 ret = -EINVAL; 138 ret = -EINVAL;
139 } 139 }
140 140
141 unlock_kernel(); 141 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
142 return ret; 142 return ret;
143} 143}
144 144
@@ -310,7 +310,8 @@ static struct async *async_getpending(struct dev_state *ps,
310 310
311static void snoop_urb(struct usb_device *udev, 311static void snoop_urb(struct usb_device *udev,
312 void __user *userurb, int pipe, unsigned length, 312 void __user *userurb, int pipe, unsigned length,
313 int timeout_or_status, enum snoop_when when) 313 int timeout_or_status, enum snoop_when when,
314 unsigned char *data, unsigned data_len)
314{ 315{
315 static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; 316 static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
316 static const char *dirs[] = {"out", "in"}; 317 static const char *dirs[] = {"out", "in"};
@@ -344,6 +345,11 @@ static void snoop_urb(struct usb_device *udev,
344 "status %d\n", 345 "status %d\n",
345 ep, t, d, length, timeout_or_status); 346 ep, t, d, length, timeout_or_status);
346 } 347 }
348
349 if (data && data_len > 0) {
350 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
351 data, data_len, 1);
352 }
347} 353}
348 354
349#define AS_CONTINUATION 1 355#define AS_CONTINUATION 1
@@ -410,7 +416,9 @@ static void async_completed(struct urb *urb)
410 } 416 }
411 snoop(&urb->dev->dev, "urb complete\n"); 417 snoop(&urb->dev->dev, "urb complete\n");
412 snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, 418 snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
413 as->status, COMPLETE); 419 as->status, COMPLETE,
420 ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_OUT) ?
421 NULL : urb->transfer_buffer, urb->actual_length);
414 if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && 422 if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
415 as->status != -ENOENT) 423 as->status != -ENOENT)
416 cancel_bulk_urbs(ps, as->bulk_addr); 424 cancel_bulk_urbs(ps, as->bulk_addr);
@@ -653,20 +661,20 @@ static int usbdev_open(struct inode *inode, struct file *file)
653 const struct cred *cred = current_cred(); 661 const struct cred *cred = current_cred();
654 int ret; 662 int ret;
655 663
656 lock_kernel();
657 /* Protect against simultaneous removal or release */
658 mutex_lock(&usbfs_mutex);
659
660 ret = -ENOMEM; 664 ret = -ENOMEM;
661 ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL); 665 ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
662 if (!ps) 666 if (!ps)
663 goto out; 667 goto out_free_ps;
664 668
665 ret = -ENODEV; 669 ret = -ENODEV;
666 670
671 /* Protect against simultaneous removal or release */
672 mutex_lock(&usbfs_mutex);
673
667 /* usbdev device-node */ 674 /* usbdev device-node */
668 if (imajor(inode) == USB_DEVICE_MAJOR) 675 if (imajor(inode) == USB_DEVICE_MAJOR)
669 dev = usbdev_lookup_by_devt(inode->i_rdev); 676 dev = usbdev_lookup_by_devt(inode->i_rdev);
677
670#ifdef CONFIG_USB_DEVICEFS 678#ifdef CONFIG_USB_DEVICEFS
671 /* procfs file */ 679 /* procfs file */
672 if (!dev) { 680 if (!dev) {
@@ -678,13 +686,19 @@ static int usbdev_open(struct inode *inode, struct file *file)
678 dev = NULL; 686 dev = NULL;
679 } 687 }
680#endif 688#endif
681 if (!dev || dev->state == USB_STATE_NOTATTACHED) 689 mutex_unlock(&usbfs_mutex);
682 goto out; 690
691 if (!dev)
692 goto out_free_ps;
693
694 usb_lock_device(dev);
695 if (dev->state == USB_STATE_NOTATTACHED)
696 goto out_unlock_device;
697
683 ret = usb_autoresume_device(dev); 698 ret = usb_autoresume_device(dev);
684 if (ret) 699 if (ret)
685 goto out; 700 goto out_unlock_device;
686 701
687 ret = 0;
688 ps->dev = dev; 702 ps->dev = dev;
689 ps->file = file; 703 ps->file = file;
690 spin_lock_init(&ps->lock); 704 spin_lock_init(&ps->lock);
@@ -702,15 +716,16 @@ static int usbdev_open(struct inode *inode, struct file *file)
702 smp_wmb(); 716 smp_wmb();
703 list_add_tail(&ps->list, &dev->filelist); 717 list_add_tail(&ps->list, &dev->filelist);
704 file->private_data = ps; 718 file->private_data = ps;
719 usb_unlock_device(dev);
705 snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), 720 snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
706 current->comm); 721 current->comm);
707 out: 722 return ret;
708 if (ret) { 723
709 kfree(ps); 724 out_unlock_device:
710 usb_put_dev(dev); 725 usb_unlock_device(dev);
711 } 726 usb_put_dev(dev);
712 mutex_unlock(&usbfs_mutex); 727 out_free_ps:
713 unlock_kernel(); 728 kfree(ps);
714 return ret; 729 return ret;
715} 730}
716 731
@@ -724,10 +739,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
724 usb_lock_device(dev); 739 usb_lock_device(dev);
725 usb_hub_release_all_ports(dev, ps); 740 usb_hub_release_all_ports(dev, ps);
726 741
727 /* Protect against simultaneous open */
728 mutex_lock(&usbfs_mutex);
729 list_del_init(&ps->list); 742 list_del_init(&ps->list);
730 mutex_unlock(&usbfs_mutex);
731 743
732 for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); 744 for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
733 ifnum++) { 745 ifnum++) {
@@ -770,6 +782,13 @@ static int proc_control(struct dev_state *ps, void __user *arg)
770 if (!tbuf) 782 if (!tbuf)
771 return -ENOMEM; 783 return -ENOMEM;
772 tmo = ctrl.timeout; 784 tmo = ctrl.timeout;
785 snoop(&dev->dev, "control urb: bRequestType=%02x "
786 "bRequest=%02x wValue=%04x "
787 "wIndex=%04x wLength=%04x\n",
788 ctrl.bRequestType, ctrl.bRequest,
789 __le16_to_cpup(&ctrl.wValue),
790 __le16_to_cpup(&ctrl.wIndex),
791 __le16_to_cpup(&ctrl.wLength));
773 if (ctrl.bRequestType & 0x80) { 792 if (ctrl.bRequestType & 0x80) {
774 if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, 793 if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
775 ctrl.wLength)) { 794 ctrl.wLength)) {
@@ -777,15 +796,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
777 return -EINVAL; 796 return -EINVAL;
778 } 797 }
779 pipe = usb_rcvctrlpipe(dev, 0); 798 pipe = usb_rcvctrlpipe(dev, 0);
780 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); 799 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
781 800
782 usb_unlock_device(dev); 801 usb_unlock_device(dev);
783 i = usb_control_msg(dev, pipe, ctrl.bRequest, 802 i = usb_control_msg(dev, pipe, ctrl.bRequest,
784 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, 803 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
785 tbuf, ctrl.wLength, tmo); 804 tbuf, ctrl.wLength, tmo);
786 usb_lock_device(dev); 805 usb_lock_device(dev);
787 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); 806 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
788 807 tbuf, i);
789 if ((i > 0) && ctrl.wLength) { 808 if ((i > 0) && ctrl.wLength) {
790 if (copy_to_user(ctrl.data, tbuf, i)) { 809 if (copy_to_user(ctrl.data, tbuf, i)) {
791 free_page((unsigned long)tbuf); 810 free_page((unsigned long)tbuf);
@@ -800,14 +819,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
800 } 819 }
801 } 820 }
802 pipe = usb_sndctrlpipe(dev, 0); 821 pipe = usb_sndctrlpipe(dev, 0);
803 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); 822 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
823 tbuf, ctrl.wLength);
804 824
805 usb_unlock_device(dev); 825 usb_unlock_device(dev);
806 i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, 826 i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
807 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, 827 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
808 tbuf, ctrl.wLength, tmo); 828 tbuf, ctrl.wLength, tmo);
809 usb_lock_device(dev); 829 usb_lock_device(dev);
810 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); 830 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
811 } 831 }
812 free_page((unsigned long)tbuf); 832 free_page((unsigned long)tbuf);
813 if (i < 0 && i != -EPIPE) { 833 if (i < 0 && i != -EPIPE) {
@@ -853,12 +873,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
853 kfree(tbuf); 873 kfree(tbuf);
854 return -EINVAL; 874 return -EINVAL;
855 } 875 }
856 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); 876 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
857 877
858 usb_unlock_device(dev); 878 usb_unlock_device(dev);
859 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); 879 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
860 usb_lock_device(dev); 880 usb_lock_device(dev);
861 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); 881 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
862 882
863 if (!i && len2) { 883 if (!i && len2) {
864 if (copy_to_user(bulk.data, tbuf, len2)) { 884 if (copy_to_user(bulk.data, tbuf, len2)) {
@@ -873,12 +893,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
873 return -EFAULT; 893 return -EFAULT;
874 } 894 }
875 } 895 }
876 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); 896 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
877 897
878 usb_unlock_device(dev); 898 usb_unlock_device(dev);
879 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); 899 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
880 usb_lock_device(dev); 900 usb_lock_device(dev);
881 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); 901 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
882 } 902 }
883 kfree(tbuf); 903 kfree(tbuf);
884 if (i < 0) 904 if (i < 0)
@@ -1097,6 +1117,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1097 is_in = 0; 1117 is_in = 0;
1098 uurb->endpoint &= ~USB_DIR_IN; 1118 uurb->endpoint &= ~USB_DIR_IN;
1099 } 1119 }
1120 snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
1121 "bRequest=%02x wValue=%04x "
1122 "wIndex=%04x wLength=%04x\n",
1123 dr->bRequestType, dr->bRequest,
1124 __le16_to_cpup(&dr->wValue),
1125 __le16_to_cpup(&dr->wIndex),
1126 __le16_to_cpup(&dr->wLength));
1100 break; 1127 break;
1101 1128
1102 case USBDEVFS_URB_TYPE_BULK: 1129 case USBDEVFS_URB_TYPE_BULK:
@@ -1104,13 +1131,25 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1104 case USB_ENDPOINT_XFER_CONTROL: 1131 case USB_ENDPOINT_XFER_CONTROL:
1105 case USB_ENDPOINT_XFER_ISOC: 1132 case USB_ENDPOINT_XFER_ISOC:
1106 return -EINVAL; 1133 return -EINVAL;
1107 /* allow single-shot interrupt transfers, at bogus rates */ 1134 case USB_ENDPOINT_XFER_INT:
1135 /* allow single-shot interrupt transfers */
1136 uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
1137 goto interrupt_urb;
1108 } 1138 }
1109 uurb->number_of_packets = 0; 1139 uurb->number_of_packets = 0;
1110 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) 1140 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1111 return -EINVAL; 1141 return -EINVAL;
1112 break; 1142 break;
1113 1143
1144 case USBDEVFS_URB_TYPE_INTERRUPT:
1145 if (!usb_endpoint_xfer_int(&ep->desc))
1146 return -EINVAL;
1147 interrupt_urb:
1148 uurb->number_of_packets = 0;
1149 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1150 return -EINVAL;
1151 break;
1152
1114 case USBDEVFS_URB_TYPE_ISO: 1153 case USBDEVFS_URB_TYPE_ISO:
1115 /* arbitrary limit */ 1154 /* arbitrary limit */
1116 if (uurb->number_of_packets < 1 || 1155 if (uurb->number_of_packets < 1 ||
@@ -1143,14 +1182,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1143 uurb->buffer_length = totlen; 1182 uurb->buffer_length = totlen;
1144 break; 1183 break;
1145 1184
1146 case USBDEVFS_URB_TYPE_INTERRUPT:
1147 uurb->number_of_packets = 0;
1148 if (!usb_endpoint_xfer_int(&ep->desc))
1149 return -EINVAL;
1150 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1151 return -EINVAL;
1152 break;
1153
1154 default: 1185 default:
1155 return -EINVAL; 1186 return -EINVAL;
1156 } 1187 }
@@ -1236,7 +1267,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1236 } 1267 }
1237 } 1268 }
1238 snoop_urb(ps->dev, as->userurb, as->urb->pipe, 1269 snoop_urb(ps->dev, as->userurb, as->urb->pipe,
1239 as->urb->transfer_buffer_length, 0, SUBMIT); 1270 as->urb->transfer_buffer_length, 0, SUBMIT,
1271 is_in ? NULL : as->urb->transfer_buffer,
1272 uurb->buffer_length);
1240 async_newpending(as); 1273 async_newpending(as);
1241 1274
1242 if (usb_endpoint_xfer_bulk(&ep->desc)) { 1275 if (usb_endpoint_xfer_bulk(&ep->desc)) {
@@ -1274,7 +1307,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1274 dev_printk(KERN_DEBUG, &ps->dev->dev, 1307 dev_printk(KERN_DEBUG, &ps->dev->dev,
1275 "usbfs: usb_submit_urb returned %d\n", ret); 1308 "usbfs: usb_submit_urb returned %d\n", ret);
1276 snoop_urb(ps->dev, as->userurb, as->urb->pipe, 1309 snoop_urb(ps->dev, as->userurb, as->urb->pipe,
1277 0, ret, COMPLETE); 1310 0, ret, COMPLETE, NULL, 0);
1278 async_removepending(as); 1311 async_removepending(as);
1279 free_async(as); 1312 free_async(as);
1280 return ret; 1313 return ret;
@@ -1628,7 +1661,10 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
1628 if (driver == NULL || driver->ioctl == NULL) { 1661 if (driver == NULL || driver->ioctl == NULL) {
1629 retval = -ENOTTY; 1662 retval = -ENOTTY;
1630 } else { 1663 } else {
1664 /* keep API that guarantees BKL */
1665 lock_kernel();
1631 retval = driver->ioctl(intf, ctl->ioctl_code, buf); 1666 retval = driver->ioctl(intf, ctl->ioctl_code, buf);
1667 unlock_kernel();
1632 if (retval == -ENOIOCTLCMD) 1668 if (retval == -ENOIOCTLCMD)
1633 retval = -ENOTTY; 1669 retval = -ENOTTY;
1634 } 1670 }
@@ -1711,6 +1747,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
1711 1747
1712 if (!(file->f_mode & FMODE_WRITE)) 1748 if (!(file->f_mode & FMODE_WRITE))
1713 return -EPERM; 1749 return -EPERM;
1750
1714 usb_lock_device(dev); 1751 usb_lock_device(dev);
1715 if (!connected(ps)) { 1752 if (!connected(ps)) {
1716 usb_unlock_device(dev); 1753 usb_unlock_device(dev);
@@ -1877,9 +1914,7 @@ static long usbdev_ioctl(struct file *file, unsigned int cmd,
1877{ 1914{
1878 int ret; 1915 int ret;
1879 1916
1880 lock_kernel();
1881 ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); 1917 ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
1882 unlock_kernel();
1883 1918
1884 return ret; 1919 return ret;
1885} 1920}
@@ -1890,9 +1925,7 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
1890{ 1925{
1891 int ret; 1926 int ret;
1892 1927
1893 lock_kernel();
1894 ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg)); 1928 ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
1895 unlock_kernel();
1896 1929
1897 return ret; 1930 return ret;
1898} 1931}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f2f055eb6831..a7037bf81688 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -25,7 +25,7 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/usb/quirks.h> 27#include <linux/usb/quirks.h>
28#include <linux/workqueue.h> 28#include <linux/pm_runtime.h>
29#include "hcd.h" 29#include "hcd.h"
30#include "usb.h" 30#include "usb.h"
31 31
@@ -221,7 +221,7 @@ static int usb_probe_device(struct device *dev)
221{ 221{
222 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); 222 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
223 struct usb_device *udev = to_usb_device(dev); 223 struct usb_device *udev = to_usb_device(dev);
224 int error = -ENODEV; 224 int error = 0;
225 225
226 dev_dbg(dev, "%s\n", __func__); 226 dev_dbg(dev, "%s\n", __func__);
227 227
@@ -230,18 +230,23 @@ static int usb_probe_device(struct device *dev)
230 /* The device should always appear to be in use 230 /* The device should always appear to be in use
231 * unless the driver suports autosuspend. 231 * unless the driver suports autosuspend.
232 */ 232 */
233 udev->pm_usage_cnt = !(udriver->supports_autosuspend); 233 if (!udriver->supports_autosuspend)
234 error = usb_autoresume_device(udev);
234 235
235 error = udriver->probe(udev); 236 if (!error)
237 error = udriver->probe(udev);
236 return error; 238 return error;
237} 239}
238 240
239/* called from driver core with dev locked */ 241/* called from driver core with dev locked */
240static int usb_unbind_device(struct device *dev) 242static int usb_unbind_device(struct device *dev)
241{ 243{
244 struct usb_device *udev = to_usb_device(dev);
242 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); 245 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
243 246
244 udriver->disconnect(to_usb_device(dev)); 247 udriver->disconnect(udev);
248 if (!udriver->supports_autosuspend)
249 usb_autosuspend_device(udev);
245 return 0; 250 return 0;
246} 251}
247 252
@@ -274,60 +279,62 @@ static int usb_probe_interface(struct device *dev)
274 intf->needs_binding = 0; 279 intf->needs_binding = 0;
275 280
276 if (usb_device_is_owned(udev)) 281 if (usb_device_is_owned(udev))
277 return -ENODEV; 282 return error;
278 283
279 if (udev->authorized == 0) { 284 if (udev->authorized == 0) {
280 dev_err(&intf->dev, "Device is not authorized for usage\n"); 285 dev_err(&intf->dev, "Device is not authorized for usage\n");
281 return -ENODEV; 286 return error;
282 } 287 }
283 288
284 id = usb_match_id(intf, driver->id_table); 289 id = usb_match_id(intf, driver->id_table);
285 if (!id) 290 if (!id)
286 id = usb_match_dynamic_id(intf, driver); 291 id = usb_match_dynamic_id(intf, driver);
287 if (id) { 292 if (!id)
288 dev_dbg(dev, "%s - got id\n", __func__); 293 return error;
289
290 error = usb_autoresume_device(udev);
291 if (error)
292 return error;
293 294
294 /* Interface "power state" doesn't correspond to any hardware 295 dev_dbg(dev, "%s - got id\n", __func__);
295 * state whatsoever. We use it to record when it's bound to
296 * a driver that may start I/0: it's not frozen/quiesced.
297 */
298 mark_active(intf);
299 intf->condition = USB_INTERFACE_BINDING;
300 296
301 /* The interface should always appear to be in use 297 error = usb_autoresume_device(udev);
302 * unless the driver suports autosuspend. 298 if (error)
303 */ 299 return error;
304 atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend);
305
306 /* Carry out a deferred switch to altsetting 0 */
307 if (intf->needs_altsetting0) {
308 error = usb_set_interface(udev, intf->altsetting[0].
309 desc.bInterfaceNumber, 0);
310 if (error < 0)
311 goto err;
312 300
313 intf->needs_altsetting0 = 0; 301 intf->condition = USB_INTERFACE_BINDING;
314 }
315 302
316 error = driver->probe(intf, id); 303 /* Bound interfaces are initially active. They are
317 if (error) 304 * runtime-PM-enabled only if the driver has autosuspend support.
305 * They are sensitive to their children's power states.
306 */
307 pm_runtime_set_active(dev);
308 pm_suspend_ignore_children(dev, false);
309 if (driver->supports_autosuspend)
310 pm_runtime_enable(dev);
311
312 /* Carry out a deferred switch to altsetting 0 */
313 if (intf->needs_altsetting0) {
314 error = usb_set_interface(udev, intf->altsetting[0].
315 desc.bInterfaceNumber, 0);
316 if (error < 0)
318 goto err; 317 goto err;
319 318 intf->needs_altsetting0 = 0;
320 intf->condition = USB_INTERFACE_BOUND;
321 usb_autosuspend_device(udev);
322 } 319 }
323 320
321 error = driver->probe(intf, id);
322 if (error)
323 goto err;
324
325 intf->condition = USB_INTERFACE_BOUND;
326 usb_autosuspend_device(udev);
324 return error; 327 return error;
325 328
326err: 329 err:
327 mark_quiesced(intf);
328 intf->needs_remote_wakeup = 0; 330 intf->needs_remote_wakeup = 0;
329 intf->condition = USB_INTERFACE_UNBOUND; 331 intf->condition = USB_INTERFACE_UNBOUND;
330 usb_cancel_queued_reset(intf); 332 usb_cancel_queued_reset(intf);
333
334 /* Unbound interfaces are always runtime-PM-disabled and -suspended */
335 pm_runtime_disable(dev);
336 pm_runtime_set_suspended(dev);
337
331 usb_autosuspend_device(udev); 338 usb_autosuspend_device(udev);
332 return error; 339 return error;
333} 340}
@@ -377,9 +384,17 @@ static int usb_unbind_interface(struct device *dev)
377 usb_set_intfdata(intf, NULL); 384 usb_set_intfdata(intf, NULL);
378 385
379 intf->condition = USB_INTERFACE_UNBOUND; 386 intf->condition = USB_INTERFACE_UNBOUND;
380 mark_quiesced(intf);
381 intf->needs_remote_wakeup = 0; 387 intf->needs_remote_wakeup = 0;
382 388
389 /* Unbound interfaces are always runtime-PM-disabled and -suspended */
390 pm_runtime_disable(dev);
391 pm_runtime_set_suspended(dev);
392
393 /* Undo any residual pm_autopm_get_interface_* calls */
394 for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
395 usb_autopm_put_interface_no_suspend(intf);
396 atomic_set(&intf->pm_usage_cnt, 0);
397
383 if (!error) 398 if (!error)
384 usb_autosuspend_device(udev); 399 usb_autosuspend_device(udev);
385 400
@@ -410,7 +425,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
410 struct usb_interface *iface, void *priv) 425 struct usb_interface *iface, void *priv)
411{ 426{
412 struct device *dev = &iface->dev; 427 struct device *dev = &iface->dev;
413 struct usb_device *udev = interface_to_usbdev(iface);
414 int retval = 0; 428 int retval = 0;
415 429
416 if (dev->driver) 430 if (dev->driver)
@@ -420,11 +434,16 @@ int usb_driver_claim_interface(struct usb_driver *driver,
420 usb_set_intfdata(iface, priv); 434 usb_set_intfdata(iface, priv);
421 iface->needs_binding = 0; 435 iface->needs_binding = 0;
422 436
423 usb_pm_lock(udev);
424 iface->condition = USB_INTERFACE_BOUND; 437 iface->condition = USB_INTERFACE_BOUND;
425 mark_active(iface); 438
426 atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend); 439 /* Bound interfaces are initially active. They are
427 usb_pm_unlock(udev); 440 * runtime-PM-enabled only if the driver has autosuspend support.
441 * They are sensitive to their children's power states.
442 */
443 pm_runtime_set_active(dev);
444 pm_suspend_ignore_children(dev, false);
445 if (driver->supports_autosuspend)
446 pm_runtime_enable(dev);
428 447
429 /* if interface was already added, bind now; else let 448 /* if interface was already added, bind now; else let
430 * the future device_add() bind it, bypassing probe() 449 * the future device_add() bind it, bypassing probe()
@@ -691,9 +710,6 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
691{ 710{
692 struct usb_device *usb_dev; 711 struct usb_device *usb_dev;
693 712
694 /* driver is often null here; dev_dbg() would oops */
695 pr_debug("usb %s: uevent\n", dev_name(dev));
696
697 if (is_usb_device(dev)) { 713 if (is_usb_device(dev)) {
698 usb_dev = to_usb_device(dev); 714 usb_dev = to_usb_device(dev);
699 } else if (is_usb_interface(dev)) { 715 } else if (is_usb_interface(dev)) {
@@ -705,6 +721,7 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
705 } 721 }
706 722
707 if (usb_dev->devnum < 0) { 723 if (usb_dev->devnum < 0) {
724 /* driver is often null here; dev_dbg() would oops */
708 pr_debug("usb %s: already deleted?\n", dev_name(dev)); 725 pr_debug("usb %s: already deleted?\n", dev_name(dev));
709 return -ENODEV; 726 return -ENODEV;
710 } 727 }
@@ -983,7 +1000,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
983 } 1000 }
984} 1001}
985 1002
986/* Caller has locked udev's pm_mutex */
987static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) 1003static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
988{ 1004{
989 struct usb_device_driver *udriver; 1005 struct usb_device_driver *udriver;
@@ -1007,7 +1023,6 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
1007 return status; 1023 return status;
1008} 1024}
1009 1025
1010/* Caller has locked udev's pm_mutex */
1011static int usb_resume_device(struct usb_device *udev, pm_message_t msg) 1026static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
1012{ 1027{
1013 struct usb_device_driver *udriver; 1028 struct usb_device_driver *udriver;
@@ -1041,27 +1056,20 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
1041 return status; 1056 return status;
1042} 1057}
1043 1058
1044/* Caller has locked intf's usb_device's pm mutex */
1045static int usb_suspend_interface(struct usb_device *udev, 1059static int usb_suspend_interface(struct usb_device *udev,
1046 struct usb_interface *intf, pm_message_t msg) 1060 struct usb_interface *intf, pm_message_t msg)
1047{ 1061{
1048 struct usb_driver *driver; 1062 struct usb_driver *driver;
1049 int status = 0; 1063 int status = 0;
1050 1064
1051 /* with no hardware, USB interfaces only use FREEZE and ON states */ 1065 if (udev->state == USB_STATE_NOTATTACHED ||
1052 if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf)) 1066 intf->condition == USB_INTERFACE_UNBOUND)
1053 goto done;
1054
1055 /* This can happen; see usb_driver_release_interface() */
1056 if (intf->condition == USB_INTERFACE_UNBOUND)
1057 goto done; 1067 goto done;
1058 driver = to_usb_driver(intf->dev.driver); 1068 driver = to_usb_driver(intf->dev.driver);
1059 1069
1060 if (driver->suspend) { 1070 if (driver->suspend) {
1061 status = driver->suspend(intf, msg); 1071 status = driver->suspend(intf, msg);
1062 if (status == 0) 1072 if (status && !(msg.event & PM_EVENT_AUTO))
1063 mark_quiesced(intf);
1064 else if (!(msg.event & PM_EVENT_AUTO))
1065 dev_err(&intf->dev, "%s error %d\n", 1073 dev_err(&intf->dev, "%s error %d\n",
1066 "suspend", status); 1074 "suspend", status);
1067 } else { 1075 } else {
@@ -1069,7 +1077,6 @@ static int usb_suspend_interface(struct usb_device *udev,
1069 intf->needs_binding = 1; 1077 intf->needs_binding = 1;
1070 dev_warn(&intf->dev, "no %s for driver %s?\n", 1078 dev_warn(&intf->dev, "no %s for driver %s?\n",
1071 "suspend", driver->name); 1079 "suspend", driver->name);
1072 mark_quiesced(intf);
1073 } 1080 }
1074 1081
1075 done: 1082 done:
@@ -1077,14 +1084,13 @@ static int usb_suspend_interface(struct usb_device *udev,
1077 return status; 1084 return status;
1078} 1085}
1079 1086
1080/* Caller has locked intf's usb_device's pm_mutex */
1081static int usb_resume_interface(struct usb_device *udev, 1087static int usb_resume_interface(struct usb_device *udev,
1082 struct usb_interface *intf, pm_message_t msg, int reset_resume) 1088 struct usb_interface *intf, pm_message_t msg, int reset_resume)
1083{ 1089{
1084 struct usb_driver *driver; 1090 struct usb_driver *driver;
1085 int status = 0; 1091 int status = 0;
1086 1092
1087 if (udev->state == USB_STATE_NOTATTACHED || is_active(intf)) 1093 if (udev->state == USB_STATE_NOTATTACHED)
1088 goto done; 1094 goto done;
1089 1095
1090 /* Don't let autoresume interfere with unbinding */ 1096 /* Don't let autoresume interfere with unbinding */
@@ -1135,90 +1141,11 @@ static int usb_resume_interface(struct usb_device *udev,
1135 1141
1136done: 1142done:
1137 dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status); 1143 dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
1138 if (status == 0 && intf->condition == USB_INTERFACE_BOUND)
1139 mark_active(intf);
1140 1144
1141 /* Later we will unbind the driver and/or reprobe, if necessary */ 1145 /* Later we will unbind the driver and/or reprobe, if necessary */
1142 return status; 1146 return status;
1143} 1147}
1144 1148
1145#ifdef CONFIG_USB_SUSPEND
1146
1147/* Internal routine to check whether we may autosuspend a device. */
1148static int autosuspend_check(struct usb_device *udev, int reschedule)
1149{
1150 int i;
1151 struct usb_interface *intf;
1152 unsigned long suspend_time, j;
1153
1154 /* For autosuspend, fail fast if anything is in use or autosuspend
1155 * is disabled. Also fail if any interfaces require remote wakeup
1156 * but it isn't available.
1157 */
1158 if (udev->pm_usage_cnt > 0)
1159 return -EBUSY;
1160 if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled)
1161 return -EPERM;
1162
1163 suspend_time = udev->last_busy + udev->autosuspend_delay;
1164 if (udev->actconfig) {
1165 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1166 intf = udev->actconfig->interface[i];
1167 if (!is_active(intf))
1168 continue;
1169 if (atomic_read(&intf->pm_usage_cnt) > 0)
1170 return -EBUSY;
1171 if (intf->needs_remote_wakeup &&
1172 !udev->do_remote_wakeup) {
1173 dev_dbg(&udev->dev, "remote wakeup needed "
1174 "for autosuspend\n");
1175 return -EOPNOTSUPP;
1176 }
1177
1178 /* Don't allow autosuspend if the device will need
1179 * a reset-resume and any of its interface drivers
1180 * doesn't include support.
1181 */
1182 if (udev->quirks & USB_QUIRK_RESET_RESUME) {
1183 struct usb_driver *driver;
1184
1185 driver = to_usb_driver(intf->dev.driver);
1186 if (!driver->reset_resume ||
1187 intf->needs_remote_wakeup)
1188 return -EOPNOTSUPP;
1189 }
1190 }
1191 }
1192
1193 /* If everything is okay but the device hasn't been idle for long
1194 * enough, queue a delayed autosuspend request. If the device
1195 * _has_ been idle for long enough and the reschedule flag is set,
1196 * likewise queue a delayed (1 second) autosuspend request.
1197 */
1198 j = jiffies;
1199 if (time_before(j, suspend_time))
1200 reschedule = 1;
1201 else
1202 suspend_time = j + HZ;
1203 if (reschedule) {
1204 if (!timer_pending(&udev->autosuspend.timer)) {
1205 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
1206 round_jiffies_up_relative(suspend_time - j));
1207 }
1208 return -EAGAIN;
1209 }
1210 return 0;
1211}
1212
1213#else
1214
1215static inline int autosuspend_check(struct usb_device *udev, int reschedule)
1216{
1217 return 0;
1218}
1219
1220#endif /* CONFIG_USB_SUSPEND */
1221
1222/** 1149/**
1223 * usb_suspend_both - suspend a USB device and its interfaces 1150 * usb_suspend_both - suspend a USB device and its interfaces
1224 * @udev: the usb_device to suspend 1151 * @udev: the usb_device to suspend
@@ -1230,27 +1157,12 @@ static inline int autosuspend_check(struct usb_device *udev, int reschedule)
1230 * all the interfaces which were suspended are resumed so that they remain 1157 * all the interfaces which were suspended are resumed so that they remain
1231 * in the same state as the device. 1158 * in the same state as the device.
1232 * 1159 *
1233 * If an autosuspend is in progress the routine checks first to make sure 1160 * Autosuspend requests originating from a child device or an interface
1234 * that neither the device itself or any of its active interfaces is in use 1161 * driver may be made without the protection of @udev's device lock, but
1235 * (pm_usage_cnt is greater than 0). If they are, the autosuspend fails. 1162 * all other suspend calls will hold the lock. Usbcore will insure that
1236 * 1163 * method calls do not arrive during bind, unbind, or reset operations.
1237 * If the suspend succeeds, the routine recursively queues an autosuspend 1164 * However drivers must be prepared to handle suspend calls arriving at
1238 * request for @udev's parent device, thereby propagating the change up 1165 * unpredictable times.
1239 * the device tree. If all of the parent's children are now suspended,
1240 * the parent will autosuspend in turn.
1241 *
1242 * The suspend method calls are subject to mutual exclusion under control
1243 * of @udev's pm_mutex. Many of these calls are also under the protection
1244 * of @udev's device lock (including all requests originating outside the
1245 * USB subsystem), but autosuspend requests generated by a child device or
1246 * interface driver may not be. Usbcore will insure that the method calls
1247 * do not arrive during bind, unbind, or reset operations. However, drivers
1248 * must be prepared to handle suspend calls arriving at unpredictable times.
1249 * The only way to block such calls is to do an autoresume (preventing
1250 * autosuspends) while holding @udev's device lock (preventing outside
1251 * suspends).
1252 *
1253 * The caller must hold @udev->pm_mutex.
1254 * 1166 *
1255 * This routine can run only in process context. 1167 * This routine can run only in process context.
1256 */ 1168 */
@@ -1259,20 +1171,11 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1259 int status = 0; 1171 int status = 0;
1260 int i = 0; 1172 int i = 0;
1261 struct usb_interface *intf; 1173 struct usb_interface *intf;
1262 struct usb_device *parent = udev->parent;
1263 1174
1264 if (udev->state == USB_STATE_NOTATTACHED || 1175 if (udev->state == USB_STATE_NOTATTACHED ||
1265 udev->state == USB_STATE_SUSPENDED) 1176 udev->state == USB_STATE_SUSPENDED)
1266 goto done; 1177 goto done;
1267 1178
1268 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1269
1270 if (msg.event & PM_EVENT_AUTO) {
1271 status = autosuspend_check(udev, 0);
1272 if (status < 0)
1273 goto done;
1274 }
1275
1276 /* Suspend all the interfaces and then udev itself */ 1179 /* Suspend all the interfaces and then udev itself */
1277 if (udev->actconfig) { 1180 if (udev->actconfig) {
1278 for (; i < udev->actconfig->desc.bNumInterfaces; i++) { 1181 for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
@@ -1287,35 +1190,21 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1287 1190
1288 /* If the suspend failed, resume interfaces that did get suspended */ 1191 /* If the suspend failed, resume interfaces that did get suspended */
1289 if (status != 0) { 1192 if (status != 0) {
1290 pm_message_t msg2; 1193 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
1291
1292 msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
1293 while (--i >= 0) { 1194 while (--i >= 0) {
1294 intf = udev->actconfig->interface[i]; 1195 intf = udev->actconfig->interface[i];
1295 usb_resume_interface(udev, intf, msg2, 0); 1196 usb_resume_interface(udev, intf, msg, 0);
1296 } 1197 }
1297 1198
1298 /* Try another autosuspend when the interfaces aren't busy */ 1199 /* If the suspend succeeded then prevent any more URB submissions
1299 if (msg.event & PM_EVENT_AUTO) 1200 * and flush any outstanding URBs.
1300 autosuspend_check(udev, status == -EBUSY);
1301
1302 /* If the suspend succeeded then prevent any more URB submissions,
1303 * flush any outstanding URBs, and propagate the suspend up the tree.
1304 */ 1201 */
1305 } else { 1202 } else {
1306 cancel_delayed_work(&udev->autosuspend);
1307 udev->can_submit = 0; 1203 udev->can_submit = 0;
1308 for (i = 0; i < 16; ++i) { 1204 for (i = 0; i < 16; ++i) {
1309 usb_hcd_flush_endpoint(udev, udev->ep_out[i]); 1205 usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
1310 usb_hcd_flush_endpoint(udev, udev->ep_in[i]); 1206 usb_hcd_flush_endpoint(udev, udev->ep_in[i]);
1311 } 1207 }
1312
1313 /* If this is just a FREEZE or a PRETHAW, udev might
1314 * not really be suspended. Only true suspends get
1315 * propagated up the device tree.
1316 */
1317 if (parent && udev->state == USB_STATE_SUSPENDED)
1318 usb_autosuspend_device(parent);
1319 } 1208 }
1320 1209
1321 done: 1210 done:
@@ -1332,23 +1221,12 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1332 * the resume method for @udev and then calls the resume methods for all 1221 * the resume method for @udev and then calls the resume methods for all
1333 * the interface drivers in @udev. 1222 * the interface drivers in @udev.
1334 * 1223 *
1335 * Before starting the resume, the routine calls itself recursively for 1224 * Autoresume requests originating from a child device or an interface
1336 * the parent device of @udev, thereby propagating the change up the device 1225 * driver may be made without the protection of @udev's device lock, but
1337 * tree and assuring that @udev will be able to resume. If the parent is 1226 * all other resume calls will hold the lock. Usbcore will insure that
1338 * unable to resume successfully, the routine fails. 1227 * method calls do not arrive during bind, unbind, or reset operations.
1339 * 1228 * However drivers must be prepared to handle resume calls arriving at
1340 * The resume method calls are subject to mutual exclusion under control 1229 * unpredictable times.
1341 * of @udev's pm_mutex. Many of these calls are also under the protection
1342 * of @udev's device lock (including all requests originating outside the
1343 * USB subsystem), but autoresume requests generated by a child device or
1344 * interface driver may not be. Usbcore will insure that the method calls
1345 * do not arrive during bind, unbind, or reset operations. However, drivers
1346 * must be prepared to handle resume calls arriving at unpredictable times.
1347 * The only way to block such calls is to do an autoresume (preventing
1348 * other autoresumes) while holding @udev's device lock (preventing outside
1349 * resumes).
1350 *
1351 * The caller must hold @udev->pm_mutex.
1352 * 1230 *
1353 * This routine can run only in process context. 1231 * This routine can run only in process context.
1354 */ 1232 */
@@ -1357,48 +1235,18 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1357 int status = 0; 1235 int status = 0;
1358 int i; 1236 int i;
1359 struct usb_interface *intf; 1237 struct usb_interface *intf;
1360 struct usb_device *parent = udev->parent;
1361 1238
1362 cancel_delayed_work(&udev->autosuspend);
1363 if (udev->state == USB_STATE_NOTATTACHED) { 1239 if (udev->state == USB_STATE_NOTATTACHED) {
1364 status = -ENODEV; 1240 status = -ENODEV;
1365 goto done; 1241 goto done;
1366 } 1242 }
1367 udev->can_submit = 1; 1243 udev->can_submit = 1;
1368 1244
1369 /* Propagate the resume up the tree, if necessary */ 1245 /* Resume the device */
1370 if (udev->state == USB_STATE_SUSPENDED) { 1246 if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume)
1371 if (parent) {
1372 status = usb_autoresume_device(parent);
1373 if (status == 0) {
1374 status = usb_resume_device(udev, msg);
1375 if (status || udev->state ==
1376 USB_STATE_NOTATTACHED) {
1377 usb_autosuspend_device(parent);
1378
1379 /* It's possible usb_resume_device()
1380 * failed after the port was
1381 * unsuspended, causing udev to be
1382 * logically disconnected. We don't
1383 * want usb_disconnect() to autosuspend
1384 * the parent again, so tell it that
1385 * udev disconnected while still
1386 * suspended. */
1387 if (udev->state ==
1388 USB_STATE_NOTATTACHED)
1389 udev->discon_suspended = 1;
1390 }
1391 }
1392 } else {
1393
1394 /* We can't progagate beyond the USB subsystem,
1395 * so if a root hub's controller is suspended
1396 * then we're stuck. */
1397 status = usb_resume_device(udev, msg);
1398 }
1399 } else if (udev->reset_resume)
1400 status = usb_resume_device(udev, msg); 1247 status = usb_resume_device(udev, msg);
1401 1248
1249 /* Resume the interfaces */
1402 if (status == 0 && udev->actconfig) { 1250 if (status == 0 && udev->actconfig) {
1403 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { 1251 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1404 intf = udev->actconfig->interface[i]; 1252 intf = udev->actconfig->interface[i];
@@ -1414,55 +1262,94 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1414 return status; 1262 return status;
1415} 1263}
1416 1264
1417#ifdef CONFIG_USB_SUSPEND 1265/* The device lock is held by the PM core */
1266int usb_suspend(struct device *dev, pm_message_t msg)
1267{
1268 struct usb_device *udev = to_usb_device(dev);
1418 1269
1419/* Internal routine to adjust a device's usage counter and change 1270 do_unbind_rebind(udev, DO_UNBIND);
1420 * its autosuspend state. 1271 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1421 */ 1272 return usb_suspend_both(udev, msg);
1422static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt) 1273}
1274
1275/* The device lock is held by the PM core */
1276int usb_resume(struct device *dev, pm_message_t msg)
1423{ 1277{
1424 int status = 0; 1278 struct usb_device *udev = to_usb_device(dev);
1279 int status;
1425 1280
1426 usb_pm_lock(udev); 1281 /* For PM complete calls, all we do is rebind interfaces */
1427 udev->pm_usage_cnt += inc_usage_cnt; 1282 if (msg.event == PM_EVENT_ON) {
1428 WARN_ON(udev->pm_usage_cnt < 0); 1283 if (udev->state != USB_STATE_NOTATTACHED)
1429 if (inc_usage_cnt) 1284 do_unbind_rebind(udev, DO_REBIND);
1430 udev->last_busy = jiffies; 1285 status = 0;
1431 if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) { 1286
1432 if (udev->state == USB_STATE_SUSPENDED) 1287 /* For all other calls, take the device back to full power and
1433 status = usb_resume_both(udev, PMSG_AUTO_RESUME); 1288 * tell the PM core in case it was autosuspended previously.
1434 if (status != 0) 1289 */
1435 udev->pm_usage_cnt -= inc_usage_cnt; 1290 } else {
1436 else if (inc_usage_cnt) 1291 status = usb_resume_both(udev, msg);
1292 if (status == 0) {
1293 pm_runtime_disable(dev);
1294 pm_runtime_set_active(dev);
1295 pm_runtime_enable(dev);
1437 udev->last_busy = jiffies; 1296 udev->last_busy = jiffies;
1438 } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) { 1297 }
1439 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1440 } 1298 }
1441 usb_pm_unlock(udev); 1299
1300 /* Avoid PM error messages for devices disconnected while suspended
1301 * as we'll display regular disconnect messages just a bit later.
1302 */
1303 if (status == -ENODEV)
1304 status = 0;
1442 return status; 1305 return status;
1443} 1306}
1444 1307
1445/* usb_autosuspend_work - callback routine to autosuspend a USB device */ 1308#endif /* CONFIG_PM */
1446void usb_autosuspend_work(struct work_struct *work) 1309
1447{ 1310#ifdef CONFIG_USB_SUSPEND
1448 struct usb_device *udev =
1449 container_of(work, struct usb_device, autosuspend.work);
1450 1311
1451 usb_autopm_do_device(udev, 0); 1312/**
1313 * usb_enable_autosuspend - allow a USB device to be autosuspended
1314 * @udev: the USB device which may be autosuspended
1315 *
1316 * This routine allows @udev to be autosuspended. An autosuspend won't
1317 * take place until the autosuspend_delay has elapsed and all the other
1318 * necessary conditions are satisfied.
1319 *
1320 * The caller must hold @udev's device lock.
1321 */
1322int usb_enable_autosuspend(struct usb_device *udev)
1323{
1324 if (udev->autosuspend_disabled) {
1325 udev->autosuspend_disabled = 0;
1326 usb_autosuspend_device(udev);
1327 }
1328 return 0;
1452} 1329}
1330EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
1453 1331
1454/* usb_autoresume_work - callback routine to autoresume a USB device */ 1332/**
1455void usb_autoresume_work(struct work_struct *work) 1333 * usb_disable_autosuspend - prevent a USB device from being autosuspended
1334 * @udev: the USB device which may not be autosuspended
1335 *
1336 * This routine prevents @udev from being autosuspended and wakes it up
1337 * if it is already autosuspended.
1338 *
1339 * The caller must hold @udev's device lock.
1340 */
1341int usb_disable_autosuspend(struct usb_device *udev)
1456{ 1342{
1457 struct usb_device *udev = 1343 int rc = 0;
1458 container_of(work, struct usb_device, autoresume);
1459 1344
1460 /* Wake it up, let the drivers do their thing, and then put it 1345 if (!udev->autosuspend_disabled) {
1461 * back to sleep. 1346 rc = usb_autoresume_device(udev);
1462 */ 1347 if (rc == 0)
1463 if (usb_autopm_do_device(udev, 1) == 0) 1348 udev->autosuspend_disabled = 1;
1464 usb_autopm_do_device(udev, -1); 1349 }
1350 return rc;
1465} 1351}
1352EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
1466 1353
1467/** 1354/**
1468 * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces 1355 * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
@@ -1472,15 +1359,11 @@ void usb_autoresume_work(struct work_struct *work)
1472 * @udev and wants to allow it to autosuspend. Examples would be when 1359 * @udev and wants to allow it to autosuspend. Examples would be when
1473 * @udev's device file in usbfs is closed or after a configuration change. 1360 * @udev's device file in usbfs is closed or after a configuration change.
1474 * 1361 *
1475 * @udev's usage counter is decremented. If it or any of the usage counters 1362 * @udev's usage counter is decremented; if it drops to 0 and all the
1476 * for an active interface is greater than 0, no autosuspend request will be 1363 * interfaces are inactive then a delayed autosuspend will be attempted.
1477 * queued. (If an interface driver does not support autosuspend then its 1364 * The attempt may fail (see autosuspend_check()).
1478 * usage counter is permanently positive.) Furthermore, if an interface
1479 * driver requires remote-wakeup capability during autosuspend but remote
1480 * wakeup is disabled, the autosuspend will fail.
1481 * 1365 *
1482 * Often the caller will hold @udev's device lock, but this is not 1366 * The caller must hold @udev's device lock.
1483 * necessary.
1484 * 1367 *
1485 * This routine can run only in process context. 1368 * This routine can run only in process context.
1486 */ 1369 */
@@ -1488,9 +1371,11 @@ void usb_autosuspend_device(struct usb_device *udev)
1488{ 1371{
1489 int status; 1372 int status;
1490 1373
1491 status = usb_autopm_do_device(udev, -1); 1374 udev->last_busy = jiffies;
1492 dev_vdbg(&udev->dev, "%s: cnt %d\n", 1375 status = pm_runtime_put_sync(&udev->dev);
1493 __func__, udev->pm_usage_cnt); 1376 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1377 __func__, atomic_read(&udev->dev.power.usage_count),
1378 status);
1494} 1379}
1495 1380
1496/** 1381/**
@@ -1500,17 +1385,22 @@ void usb_autosuspend_device(struct usb_device *udev)
1500 * This routine should be called when a core subsystem thinks @udev may 1385 * This routine should be called when a core subsystem thinks @udev may
1501 * be ready to autosuspend. 1386 * be ready to autosuspend.
1502 * 1387 *
1503 * @udev's usage counter left unchanged. If it or any of the usage counters 1388 * @udev's usage counter left unchanged. If it is 0 and all the interfaces
1504 * for an active interface is greater than 0, or autosuspend is not allowed 1389 * are inactive then an autosuspend will be attempted. The attempt may
1505 * for any other reason, no autosuspend request will be queued. 1390 * fail or be delayed.
1391 *
1392 * The caller must hold @udev's device lock.
1506 * 1393 *
1507 * This routine can run only in process context. 1394 * This routine can run only in process context.
1508 */ 1395 */
1509void usb_try_autosuspend_device(struct usb_device *udev) 1396void usb_try_autosuspend_device(struct usb_device *udev)
1510{ 1397{
1511 usb_autopm_do_device(udev, 0); 1398 int status;
1512 dev_vdbg(&udev->dev, "%s: cnt %d\n", 1399
1513 __func__, udev->pm_usage_cnt); 1400 status = pm_runtime_idle(&udev->dev);
1401 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1402 __func__, atomic_read(&udev->dev.power.usage_count),
1403 status);
1514} 1404}
1515 1405
1516/** 1406/**
@@ -1519,16 +1409,15 @@ void usb_try_autosuspend_device(struct usb_device *udev)
1519 * 1409 *
1520 * This routine should be called when a core subsystem wants to use @udev 1410 * This routine should be called when a core subsystem wants to use @udev
1521 * and needs to guarantee that it is not suspended. No autosuspend will 1411 * and needs to guarantee that it is not suspended. No autosuspend will
1522 * occur until usb_autosuspend_device is called. (Note that this will not 1412 * occur until usb_autosuspend_device() is called. (Note that this will
1523 * prevent suspend events originating in the PM core.) Examples would be 1413 * not prevent suspend events originating in the PM core.) Examples would
1524 * when @udev's device file in usbfs is opened or when a remote-wakeup 1414 * be when @udev's device file in usbfs is opened or when a remote-wakeup
1525 * request is received. 1415 * request is received.
1526 * 1416 *
1527 * @udev's usage counter is incremented to prevent subsequent autosuspends. 1417 * @udev's usage counter is incremented to prevent subsequent autosuspends.
1528 * However if the autoresume fails then the usage counter is re-decremented. 1418 * However if the autoresume fails then the usage counter is re-decremented.
1529 * 1419 *
1530 * Often the caller will hold @udev's device lock, but this is not 1420 * The caller must hold @udev's device lock.
1531 * necessary (and attempting it might cause deadlock).
1532 * 1421 *
1533 * This routine can run only in process context. 1422 * This routine can run only in process context.
1534 */ 1423 */
@@ -1536,42 +1425,14 @@ int usb_autoresume_device(struct usb_device *udev)
1536{ 1425{
1537 int status; 1426 int status;
1538 1427
1539 status = usb_autopm_do_device(udev, 1); 1428 status = pm_runtime_get_sync(&udev->dev);
1540 dev_vdbg(&udev->dev, "%s: status %d cnt %d\n", 1429 if (status < 0)
1541 __func__, status, udev->pm_usage_cnt); 1430 pm_runtime_put_sync(&udev->dev);
1542 return status; 1431 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1543} 1432 __func__, atomic_read(&udev->dev.power.usage_count),
1544 1433 status);
1545/* Internal routine to adjust an interface's usage counter and change 1434 if (status > 0)
1546 * its device's autosuspend state. 1435 status = 0;
1547 */
1548static int usb_autopm_do_interface(struct usb_interface *intf,
1549 int inc_usage_cnt)
1550{
1551 struct usb_device *udev = interface_to_usbdev(intf);
1552 int status = 0;
1553
1554 usb_pm_lock(udev);
1555 if (intf->condition == USB_INTERFACE_UNBOUND)
1556 status = -ENODEV;
1557 else {
1558 atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
1559 udev->last_busy = jiffies;
1560 if (inc_usage_cnt >= 0 &&
1561 atomic_read(&intf->pm_usage_cnt) > 0) {
1562 if (udev->state == USB_STATE_SUSPENDED)
1563 status = usb_resume_both(udev,
1564 PMSG_AUTO_RESUME);
1565 if (status != 0)
1566 atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt);
1567 else
1568 udev->last_busy = jiffies;
1569 } else if (inc_usage_cnt <= 0 &&
1570 atomic_read(&intf->pm_usage_cnt) <= 0) {
1571 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1572 }
1573 }
1574 usb_pm_unlock(udev);
1575 return status; 1436 return status;
1576} 1437}
1577 1438
@@ -1585,34 +1446,25 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
1585 * closed. 1446 * closed.
1586 * 1447 *
1587 * The routine decrements @intf's usage counter. When the counter reaches 1448 * The routine decrements @intf's usage counter. When the counter reaches
1588 * 0, a delayed autosuspend request for @intf's device is queued. When 1449 * 0, a delayed autosuspend request for @intf's device is attempted. The
1589 * the delay expires, if @intf->pm_usage_cnt is still <= 0 along with all 1450 * attempt may fail (see autosuspend_check()).
1590 * the other usage counters for the sibling interfaces and @intf's
1591 * usb_device, the device and all its interfaces will be autosuspended.
1592 *
1593 * Note that @intf->pm_usage_cnt is owned by the interface driver. The
1594 * core will not change its value other than the increment and decrement
1595 * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
1596 * may use this simple counter-oriented discipline or may set the value
1597 * any way it likes.
1598 * 1451 *
1599 * If the driver has set @intf->needs_remote_wakeup then autosuspend will 1452 * If the driver has set @intf->needs_remote_wakeup then autosuspend will
1600 * take place only if the device's remote-wakeup facility is enabled. 1453 * take place only if the device's remote-wakeup facility is enabled.
1601 * 1454 *
1602 * Suspend method calls queued by this routine can arrive at any time
1603 * while @intf is resumed and its usage counter is equal to 0. They are
1604 * not protected by the usb_device's lock but only by its pm_mutex.
1605 * Drivers must provide their own synchronization.
1606 *
1607 * This routine can run only in process context. 1455 * This routine can run only in process context.
1608 */ 1456 */
1609void usb_autopm_put_interface(struct usb_interface *intf) 1457void usb_autopm_put_interface(struct usb_interface *intf)
1610{ 1458{
1611 int status; 1459 struct usb_device *udev = interface_to_usbdev(intf);
1460 int status;
1612 1461
1613 status = usb_autopm_do_interface(intf, -1); 1462 udev->last_busy = jiffies;
1614 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1463 atomic_dec(&intf->pm_usage_cnt);
1615 __func__, status, atomic_read(&intf->pm_usage_cnt)); 1464 status = pm_runtime_put_sync(&intf->dev);
1465 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1466 __func__, atomic_read(&intf->dev.power.usage_count),
1467 status);
1616} 1468}
1617EXPORT_SYMBOL_GPL(usb_autopm_put_interface); 1469EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1618 1470
@@ -1620,11 +1472,11 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1620 * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter 1472 * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
1621 * @intf: the usb_interface whose counter should be decremented 1473 * @intf: the usb_interface whose counter should be decremented
1622 * 1474 *
1623 * This routine does essentially the same thing as 1475 * This routine does much the same thing as usb_autopm_put_interface():
1624 * usb_autopm_put_interface(): it decrements @intf's usage counter and 1476 * It decrements @intf's usage counter and schedules a delayed
1625 * queues a delayed autosuspend request if the counter is <= 0. The 1477 * autosuspend request if the counter is <= 0. The difference is that it
1626 * difference is that it does not acquire the device's pm_mutex; 1478 * does not perform any synchronization; callers should hold a private
1627 * callers must handle all synchronization issues themselves. 1479 * lock and handle all synchronization issues themselves.
1628 * 1480 *
1629 * Typically a driver would call this routine during an URB's completion 1481 * Typically a driver would call this routine during an URB's completion
1630 * handler, if no more URBs were pending. 1482 * handler, if no more URBs were pending.
@@ -1634,28 +1486,58 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1634void usb_autopm_put_interface_async(struct usb_interface *intf) 1486void usb_autopm_put_interface_async(struct usb_interface *intf)
1635{ 1487{
1636 struct usb_device *udev = interface_to_usbdev(intf); 1488 struct usb_device *udev = interface_to_usbdev(intf);
1489 unsigned long last_busy;
1637 int status = 0; 1490 int status = 0;
1638 1491
1639 if (intf->condition == USB_INTERFACE_UNBOUND) { 1492 last_busy = udev->last_busy;
1640 status = -ENODEV; 1493 udev->last_busy = jiffies;
1641 } else { 1494 atomic_dec(&intf->pm_usage_cnt);
1642 udev->last_busy = jiffies; 1495 pm_runtime_put_noidle(&intf->dev);
1643 atomic_dec(&intf->pm_usage_cnt); 1496
1644 if (udev->autosuspend_disabled || udev->autosuspend_delay < 0) 1497 if (!udev->autosuspend_disabled) {
1645 status = -EPERM; 1498 /* Optimization: Don't schedule a delayed autosuspend if
1646 else if (atomic_read(&intf->pm_usage_cnt) <= 0 && 1499 * the timer is already running and the expiration time
1647 !timer_pending(&udev->autosuspend.timer)) { 1500 * wouldn't change.
1648 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, 1501 *
1502 * We have to use the interface's timer. Attempts to
1503 * schedule a suspend for the device would fail because
1504 * the interface is still active.
1505 */
1506 if (intf->dev.power.timer_expires == 0 ||
1507 round_jiffies_up(last_busy) !=
1508 round_jiffies_up(jiffies)) {
1509 status = pm_schedule_suspend(&intf->dev,
1510 jiffies_to_msecs(
1649 round_jiffies_up_relative( 1511 round_jiffies_up_relative(
1650 udev->autosuspend_delay)); 1512 udev->autosuspend_delay)));
1651 } 1513 }
1652 } 1514 }
1653 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1515 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1654 __func__, status, atomic_read(&intf->pm_usage_cnt)); 1516 __func__, atomic_read(&intf->dev.power.usage_count),
1517 status);
1655} 1518}
1656EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async); 1519EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
1657 1520
1658/** 1521/**
1522 * usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter
1523 * @intf: the usb_interface whose counter should be decremented
1524 *
1525 * This routine decrements @intf's usage counter but does not carry out an
1526 * autosuspend.
1527 *
1528 * This routine can run in atomic context.
1529 */
1530void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
1531{
1532 struct usb_device *udev = interface_to_usbdev(intf);
1533
1534 udev->last_busy = jiffies;
1535 atomic_dec(&intf->pm_usage_cnt);
1536 pm_runtime_put_noidle(&intf->dev);
1537}
1538EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
1539
1540/**
1659 * usb_autopm_get_interface - increment a USB interface's PM-usage counter 1541 * usb_autopm_get_interface - increment a USB interface's PM-usage counter
1660 * @intf: the usb_interface whose counter should be incremented 1542 * @intf: the usb_interface whose counter should be incremented
1661 * 1543 *
@@ -1667,25 +1549,8 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
1667 * or @intf is unbound. A typical example would be a character-device 1549 * or @intf is unbound. A typical example would be a character-device
1668 * driver when its device file is opened. 1550 * driver when its device file is opened.
1669 * 1551 *
1670 * 1552 * @intf's usage counter is incremented to prevent subsequent autosuspends.
1671 * The routine increments @intf's usage counter. (However if the 1553 * However if the autoresume fails then the counter is re-decremented.
1672 * autoresume fails then the counter is re-decremented.) So long as the
1673 * counter is greater than 0, autosuspend will not be allowed for @intf
1674 * or its usb_device. When the driver is finished using @intf it should
1675 * call usb_autopm_put_interface() to decrement the usage counter and
1676 * queue a delayed autosuspend request (if the counter is <= 0).
1677 *
1678 *
1679 * Note that @intf->pm_usage_cnt is owned by the interface driver. The
1680 * core will not change its value other than the increment and decrement
1681 * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
1682 * may use this simple counter-oriented discipline or may set the value
1683 * any way it likes.
1684 *
1685 * Resume method calls generated by this routine can arrive at any time
1686 * while @intf is suspended. They are not protected by the usb_device's
1687 * lock but only by its pm_mutex. Drivers must provide their own
1688 * synchronization.
1689 * 1554 *
1690 * This routine can run only in process context. 1555 * This routine can run only in process context.
1691 */ 1556 */
@@ -1693,9 +1558,16 @@ int usb_autopm_get_interface(struct usb_interface *intf)
1693{ 1558{
1694 int status; 1559 int status;
1695 1560
1696 status = usb_autopm_do_interface(intf, 1); 1561 status = pm_runtime_get_sync(&intf->dev);
1697 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1562 if (status < 0)
1698 __func__, status, atomic_read(&intf->pm_usage_cnt)); 1563 pm_runtime_put_sync(&intf->dev);
1564 else
1565 atomic_inc(&intf->pm_usage_cnt);
1566 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1567 __func__, atomic_read(&intf->dev.power.usage_count),
1568 status);
1569 if (status > 0)
1570 status = 0;
1699 return status; 1571 return status;
1700} 1572}
1701EXPORT_SYMBOL_GPL(usb_autopm_get_interface); 1573EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@@ -1705,149 +1577,207 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
1705 * @intf: the usb_interface whose counter should be incremented 1577 * @intf: the usb_interface whose counter should be incremented
1706 * 1578 *
1707 * This routine does much the same thing as 1579 * This routine does much the same thing as
1708 * usb_autopm_get_interface(): it increments @intf's usage counter and 1580 * usb_autopm_get_interface(): It increments @intf's usage counter and
1709 * queues an autoresume request if the result is > 0. The differences 1581 * queues an autoresume request if the device is suspended. The
1710 * are that it does not acquire the device's pm_mutex (callers must 1582 * differences are that it does not perform any synchronization (callers
1711 * handle all synchronization issues themselves), and it does not 1583 * should hold a private lock and handle all synchronization issues
1712 * autoresume the device directly (it only queues a request). After a 1584 * themselves), and it does not autoresume the device directly (it only
1713 * successful call, the device will generally not yet be resumed. 1585 * queues a request). After a successful call, the device may not yet be
1586 * resumed.
1714 * 1587 *
1715 * This routine can run in atomic context. 1588 * This routine can run in atomic context.
1716 */ 1589 */
1717int usb_autopm_get_interface_async(struct usb_interface *intf) 1590int usb_autopm_get_interface_async(struct usb_interface *intf)
1718{ 1591{
1719 struct usb_device *udev = interface_to_usbdev(intf); 1592 int status = 0;
1720 int status = 0; 1593 enum rpm_status s;
1721 1594
1722 if (intf->condition == USB_INTERFACE_UNBOUND) 1595 /* Don't request a resume unless the interface is already suspending
1723 status = -ENODEV; 1596 * or suspended. Doing so would force a running suspend timer to be
1724 else { 1597 * cancelled.
1598 */
1599 pm_runtime_get_noresume(&intf->dev);
1600 s = ACCESS_ONCE(intf->dev.power.runtime_status);
1601 if (s == RPM_SUSPENDING || s == RPM_SUSPENDED)
1602 status = pm_request_resume(&intf->dev);
1603
1604 if (status < 0 && status != -EINPROGRESS)
1605 pm_runtime_put_noidle(&intf->dev);
1606 else
1725 atomic_inc(&intf->pm_usage_cnt); 1607 atomic_inc(&intf->pm_usage_cnt);
1726 if (atomic_read(&intf->pm_usage_cnt) > 0 && 1608 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1727 udev->state == USB_STATE_SUSPENDED) 1609 __func__, atomic_read(&intf->dev.power.usage_count),
1728 queue_work(ksuspend_usb_wq, &udev->autoresume); 1610 status);
1729 } 1611 if (status > 0)
1730 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1612 status = 0;
1731 __func__, status, atomic_read(&intf->pm_usage_cnt));
1732 return status; 1613 return status;
1733} 1614}
1734EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async); 1615EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
1735 1616
1736#else
1737
1738void usb_autosuspend_work(struct work_struct *work)
1739{}
1740
1741void usb_autoresume_work(struct work_struct *work)
1742{}
1743
1744#endif /* CONFIG_USB_SUSPEND */
1745
1746/** 1617/**
1747 * usb_external_suspend_device - external suspend of a USB device and its interfaces 1618 * usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter
1748 * @udev: the usb_device to suspend 1619 * @intf: the usb_interface whose counter should be incremented
1749 * @msg: Power Management message describing this state transition
1750 * 1620 *
1751 * This routine handles external suspend requests: ones not generated 1621 * This routine increments @intf's usage counter but does not carry out an
1752 * internally by a USB driver (autosuspend) but rather coming from the user 1622 * autoresume.
1753 * (via sysfs) or the PM core (system sleep). The suspend will be carried
1754 * out regardless of @udev's usage counter or those of its interfaces,
1755 * and regardless of whether or not remote wakeup is enabled. Of course,
1756 * interface drivers still have the option of failing the suspend (if
1757 * there are unsuspended children, for example).
1758 * 1623 *
1759 * The caller must hold @udev's device lock. 1624 * This routine can run in atomic context.
1760 */ 1625 */
1761int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg) 1626void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
1762{ 1627{
1763 int status; 1628 struct usb_device *udev = interface_to_usbdev(intf);
1764 1629
1765 do_unbind_rebind(udev, DO_UNBIND); 1630 udev->last_busy = jiffies;
1766 usb_pm_lock(udev); 1631 atomic_inc(&intf->pm_usage_cnt);
1767 status = usb_suspend_both(udev, msg); 1632 pm_runtime_get_noresume(&intf->dev);
1768 usb_pm_unlock(udev);
1769 return status;
1770} 1633}
1634EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
1771 1635
1772/** 1636/* Internal routine to check whether we may autosuspend a device. */
1773 * usb_external_resume_device - external resume of a USB device and its interfaces 1637static int autosuspend_check(struct usb_device *udev)
1774 * @udev: the usb_device to resume
1775 * @msg: Power Management message describing this state transition
1776 *
1777 * This routine handles external resume requests: ones not generated
1778 * internally by a USB driver (autoresume) but rather coming from the user
1779 * (via sysfs), the PM core (system resume), or the device itself (remote
1780 * wakeup). @udev's usage counter is unaffected.
1781 *
1782 * The caller must hold @udev's device lock.
1783 */
1784int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
1785{ 1638{
1786 int status; 1639 int i;
1640 struct usb_interface *intf;
1641 unsigned long suspend_time, j;
1787 1642
1788 usb_pm_lock(udev); 1643 /* Fail if autosuspend is disabled, or any interfaces are in use, or
1789 status = usb_resume_both(udev, msg); 1644 * any interface drivers require remote wakeup but it isn't available.
1790 udev->last_busy = jiffies; 1645 */
1791 usb_pm_unlock(udev); 1646 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1792 if (status == 0) 1647 if (udev->actconfig) {
1793 do_unbind_rebind(udev, DO_REBIND); 1648 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1649 intf = udev->actconfig->interface[i];
1794 1650
1795 /* Now that the device is awake, we can start trying to autosuspend 1651 /* We don't need to check interfaces that are
1796 * it again. */ 1652 * disabled for runtime PM. Either they are unbound
1797 if (status == 0) 1653 * or else their drivers don't support autosuspend
1798 usb_try_autosuspend_device(udev); 1654 * and so they are permanently active.
1799 return status; 1655 */
1656 if (intf->dev.power.disable_depth)
1657 continue;
1658 if (atomic_read(&intf->dev.power.usage_count) > 0)
1659 return -EBUSY;
1660 if (intf->needs_remote_wakeup &&
1661 !udev->do_remote_wakeup) {
1662 dev_dbg(&udev->dev, "remote wakeup needed "
1663 "for autosuspend\n");
1664 return -EOPNOTSUPP;
1665 }
1666
1667 /* Don't allow autosuspend if the device will need
1668 * a reset-resume and any of its interface drivers
1669 * doesn't include support or needs remote wakeup.
1670 */
1671 if (udev->quirks & USB_QUIRK_RESET_RESUME) {
1672 struct usb_driver *driver;
1673
1674 driver = to_usb_driver(intf->dev.driver);
1675 if (!driver->reset_resume ||
1676 intf->needs_remote_wakeup)
1677 return -EOPNOTSUPP;
1678 }
1679 }
1680 }
1681
1682 /* If everything is okay but the device hasn't been idle for long
1683 * enough, queue a delayed autosuspend request.
1684 */
1685 j = ACCESS_ONCE(jiffies);
1686 suspend_time = udev->last_busy + udev->autosuspend_delay;
1687 if (time_before(j, suspend_time)) {
1688 pm_schedule_suspend(&udev->dev, jiffies_to_msecs(
1689 round_jiffies_up_relative(suspend_time - j)));
1690 return -EAGAIN;
1691 }
1692 return 0;
1800} 1693}
1801 1694
1802int usb_suspend(struct device *dev, pm_message_t msg) 1695static int usb_runtime_suspend(struct device *dev)
1803{ 1696{
1804 struct usb_device *udev; 1697 int status = 0;
1805
1806 udev = to_usb_device(dev);
1807 1698
1808 /* If udev is already suspended, we can skip this suspend and 1699 /* A USB device can be suspended if it passes the various autosuspend
1809 * we should also skip the upcoming system resume. High-speed 1700 * checks. Runtime suspend for a USB device means suspending all the
1810 * root hubs are an exception; they need to resume whenever the 1701 * interfaces and then the device itself.
1811 * system wakes up in order for USB-PERSIST port handover to work
1812 * properly.
1813 */ 1702 */
1814 if (udev->state == USB_STATE_SUSPENDED) { 1703 if (is_usb_device(dev)) {
1815 if (udev->parent || udev->speed != USB_SPEED_HIGH) 1704 struct usb_device *udev = to_usb_device(dev);
1816 udev->skip_sys_resume = 1; 1705
1817 return 0; 1706 if (autosuspend_check(udev) != 0)
1707 return -EAGAIN;
1708
1709 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1710
1711 /* If an interface fails the suspend, adjust the last_busy
1712 * time so that we don't get another suspend attempt right
1713 * away.
1714 */
1715 if (status) {
1716 udev->last_busy = jiffies +
1717 (udev->autosuspend_delay == 0 ?
1718 HZ/2 : 0);
1719 }
1720
1721 /* Prevent the parent from suspending immediately after */
1722 else if (udev->parent) {
1723 udev->parent->last_busy = jiffies;
1724 }
1818 } 1725 }
1819 1726
1820 udev->skip_sys_resume = 0; 1727 /* Runtime suspend for a USB interface doesn't mean anything. */
1821 return usb_external_suspend_device(udev, msg); 1728 return status;
1822} 1729}
1823 1730
1824int usb_resume(struct device *dev, pm_message_t msg) 1731static int usb_runtime_resume(struct device *dev)
1825{ 1732{
1826 struct usb_device *udev; 1733 /* Runtime resume for a USB device means resuming both the device
1827 int status; 1734 * and all its interfaces.
1735 */
1736 if (is_usb_device(dev)) {
1737 struct usb_device *udev = to_usb_device(dev);
1738 int status;
1828 1739
1829 udev = to_usb_device(dev); 1740 status = usb_resume_both(udev, PMSG_AUTO_RESUME);
1741 udev->last_busy = jiffies;
1742 return status;
1743 }
1830 1744
1831 /* If udev->skip_sys_resume is set then udev was already suspended 1745 /* Runtime resume for a USB interface doesn't mean anything. */
1832 * when the system sleep started, so we don't want to resume it 1746 return 0;
1833 * during this system wakeup. 1747}
1834 */
1835 if (udev->skip_sys_resume)
1836 return 0;
1837 status = usb_external_resume_device(udev, msg);
1838 1748
1839 /* Avoid PM error messages for devices disconnected while suspended 1749static int usb_runtime_idle(struct device *dev)
1840 * as we'll display regular disconnect messages just a bit later. 1750{
1751 /* An idle USB device can be suspended if it passes the various
1752 * autosuspend checks. An idle interface can be suspended at
1753 * any time.
1841 */ 1754 */
1842 if (status == -ENODEV) 1755 if (is_usb_device(dev)) {
1843 return 0; 1756 struct usb_device *udev = to_usb_device(dev);
1844 return status; 1757
1758 if (autosuspend_check(udev) != 0)
1759 return 0;
1760 }
1761
1762 pm_runtime_suspend(dev);
1763 return 0;
1845} 1764}
1846 1765
1847#endif /* CONFIG_PM */ 1766static struct dev_pm_ops usb_bus_pm_ops = {
1767 .runtime_suspend = usb_runtime_suspend,
1768 .runtime_resume = usb_runtime_resume,
1769 .runtime_idle = usb_runtime_idle,
1770};
1771
1772#else
1773
1774#define usb_bus_pm_ops (*(struct dev_pm_ops *) NULL)
1775
1776#endif /* CONFIG_USB_SUSPEND */
1848 1777
1849struct bus_type usb_bus_type = { 1778struct bus_type usb_bus_type = {
1850 .name = "usb", 1779 .name = "usb",
1851 .match = usb_device_match, 1780 .match = usb_device_match,
1852 .uevent = usb_uevent, 1781 .uevent = usb_uevent,
1782 .pm = &usb_bus_pm_ops,
1853}; 1783};
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index bfc6c2eea647..c3536f151f02 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -34,7 +34,6 @@ static int usb_open(struct inode * inode, struct file * file)
34 int err = -ENODEV; 34 int err = -ENODEV;
35 const struct file_operations *old_fops, *new_fops = NULL; 35 const struct file_operations *old_fops, *new_fops = NULL;
36 36
37 lock_kernel();
38 down_read(&minor_rwsem); 37 down_read(&minor_rwsem);
39 c = usb_minors[minor]; 38 c = usb_minors[minor];
40 39
@@ -53,7 +52,6 @@ static int usb_open(struct inode * inode, struct file * file)
53 fops_put(old_fops); 52 fops_put(old_fops);
54 done: 53 done:
55 up_read(&minor_rwsem); 54 up_read(&minor_rwsem);
56 unlock_kernel();
57 return err; 55 return err;
58} 56}
59 57
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 80995ef0868c..2f8cedda8007 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -39,6 +39,7 @@
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/pm_runtime.h>
42 43
43#include <linux/usb.h> 44#include <linux/usb.h>
44 45
@@ -141,7 +142,7 @@ static const u8 usb3_rh_dev_descriptor[18] = {
141 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */ 142 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
142 143
143 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */ 144 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
144 0x02, 0x00, /* __le16 idProduct; device 0x0002 */ 145 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
145 KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */ 146 KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
146 147
147 0x03, /* __u8 iManufacturer; */ 148 0x03, /* __u8 iManufacturer; */
@@ -1670,11 +1671,16 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1670 } 1671 }
1671 } 1672 }
1672 for (i = 0; i < num_intfs; ++i) { 1673 for (i = 0; i < num_intfs; ++i) {
1674 struct usb_host_interface *first_alt;
1675 int iface_num;
1676
1677 first_alt = &new_config->intf_cache[i]->altsetting[0];
1678 iface_num = first_alt->desc.bInterfaceNumber;
1673 /* Set up endpoints for alternate interface setting 0 */ 1679 /* Set up endpoints for alternate interface setting 0 */
1674 alt = usb_find_alt_setting(new_config, i, 0); 1680 alt = usb_find_alt_setting(new_config, iface_num, 0);
1675 if (!alt) 1681 if (!alt)
1676 /* No alt setting 0? Pick the first setting. */ 1682 /* No alt setting 0? Pick the first setting. */
1677 alt = &new_config->intf_cache[i]->altsetting[0]; 1683 alt = first_alt;
1678 1684
1679 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 1685 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
1680 ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]); 1686 ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
@@ -1853,6 +1859,10 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
1853 return status; 1859 return status;
1854} 1860}
1855 1861
1862#endif /* CONFIG_PM */
1863
1864#ifdef CONFIG_USB_SUSPEND
1865
1856/* Workqueue routine for root-hub remote wakeup */ 1866/* Workqueue routine for root-hub remote wakeup */
1857static void hcd_resume_work(struct work_struct *work) 1867static void hcd_resume_work(struct work_struct *work)
1858{ 1868{
@@ -1860,8 +1870,7 @@ static void hcd_resume_work(struct work_struct *work)
1860 struct usb_device *udev = hcd->self.root_hub; 1870 struct usb_device *udev = hcd->self.root_hub;
1861 1871
1862 usb_lock_device(udev); 1872 usb_lock_device(udev);
1863 usb_mark_last_busy(udev); 1873 usb_remote_wakeup(udev);
1864 usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
1865 usb_unlock_device(udev); 1874 usb_unlock_device(udev);
1866} 1875}
1867 1876
@@ -1880,12 +1889,12 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
1880 1889
1881 spin_lock_irqsave (&hcd_root_hub_lock, flags); 1890 spin_lock_irqsave (&hcd_root_hub_lock, flags);
1882 if (hcd->rh_registered) 1891 if (hcd->rh_registered)
1883 queue_work(ksuspend_usb_wq, &hcd->wakeup_work); 1892 queue_work(pm_wq, &hcd->wakeup_work);
1884 spin_unlock_irqrestore (&hcd_root_hub_lock, flags); 1893 spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
1885} 1894}
1886EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub); 1895EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
1887 1896
1888#endif 1897#endif /* CONFIG_USB_SUSPEND */
1889 1898
1890/*-------------------------------------------------------------------------*/ 1899/*-------------------------------------------------------------------------*/
1891 1900
@@ -2030,7 +2039,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
2030 init_timer(&hcd->rh_timer); 2039 init_timer(&hcd->rh_timer);
2031 hcd->rh_timer.function = rh_timer_func; 2040 hcd->rh_timer.function = rh_timer_func;
2032 hcd->rh_timer.data = (unsigned long) hcd; 2041 hcd->rh_timer.data = (unsigned long) hcd;
2033#ifdef CONFIG_PM 2042#ifdef CONFIG_USB_SUSPEND
2034 INIT_WORK(&hcd->wakeup_work, hcd_resume_work); 2043 INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
2035#endif 2044#endif
2036 mutex_init(&hcd->bandwidth_mutex); 2045 mutex_init(&hcd->bandwidth_mutex);
@@ -2230,7 +2239,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
2230 hcd->rh_registered = 0; 2239 hcd->rh_registered = 0;
2231 spin_unlock_irq (&hcd_root_hub_lock); 2240 spin_unlock_irq (&hcd_root_hub_lock);
2232 2241
2233#ifdef CONFIG_PM 2242#ifdef CONFIG_USB_SUSPEND
2234 cancel_work_sync(&hcd->wakeup_work); 2243 cancel_work_sync(&hcd->wakeup_work);
2235#endif 2244#endif
2236 2245
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index bbe2b924aae8..a3cdb09734ab 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -80,7 +80,7 @@ struct usb_hcd {
80 80
81 struct timer_list rh_timer; /* drives root-hub polling */ 81 struct timer_list rh_timer; /* drives root-hub polling */
82 struct urb *status_urb; /* the current status urb */ 82 struct urb *status_urb; /* the current status urb */
83#ifdef CONFIG_PM 83#ifdef CONFIG_USB_SUSPEND
84 struct work_struct wakeup_work; /* for remote wakeup */ 84 struct work_struct wakeup_work; /* for remote wakeup */
85#endif 85#endif
86 86
@@ -248,7 +248,7 @@ struct hc_driver {
248 /* xHCI specific functions */ 248 /* xHCI specific functions */
249 /* Called by usb_alloc_dev to alloc HC device structures */ 249 /* Called by usb_alloc_dev to alloc HC device structures */
250 int (*alloc_dev)(struct usb_hcd *, struct usb_device *); 250 int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
251 /* Called by usb_release_dev to free HC device structures */ 251 /* Called by usb_disconnect to free HC device structures */
252 void (*free_dev)(struct usb_hcd *, struct usb_device *); 252 void (*free_dev)(struct usb_hcd *, struct usb_device *);
253 253
254 /* Bandwidth computation functions */ 254 /* Bandwidth computation functions */
@@ -286,6 +286,7 @@ struct hc_driver {
286 */ 286 */
287 int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, 287 int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
288 struct usb_tt *tt, gfp_t mem_flags); 288 struct usb_tt *tt, gfp_t mem_flags);
289 int (*reset_device)(struct usb_hcd *, struct usb_device *);
289}; 290};
290 291
291extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 292extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -463,16 +464,20 @@ extern int usb_find_interface_driver(struct usb_device *dev,
463#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) 464#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
464 465
465#ifdef CONFIG_PM 466#ifdef CONFIG_PM
466extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
467extern void usb_root_hub_lost_power(struct usb_device *rhdev); 467extern void usb_root_hub_lost_power(struct usb_device *rhdev);
468extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); 468extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
469extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); 469extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
470#endif /* CONFIG_PM */
471
472#ifdef CONFIG_USB_SUSPEND
473extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
470#else 474#else
471static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) 475static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
472{ 476{
473 return; 477 return;
474} 478}
475#endif /* CONFIG_PM */ 479#endif /* CONFIG_USB_SUSPEND */
480
476 481
477/* 482/*
478 * USB device fs stuff 483 * USB device fs stuff
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 20ecb4cec8de..0940ccd6f4f4 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
22#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <linux/pm_runtime.h>
25 26
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/byteorder.h> 28#include <asm/byteorder.h>
@@ -71,7 +72,6 @@ struct usb_hub {
71 72
72 unsigned mA_per_port; /* current for each child */ 73 unsigned mA_per_port; /* current for each child */
73 74
74 unsigned init_done:1;
75 unsigned limited_power:1; 75 unsigned limited_power:1;
76 unsigned quiescing:1; 76 unsigned quiescing:1;
77 unsigned disconnected:1; 77 unsigned disconnected:1;
@@ -820,7 +820,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
820 } 820 }
821 init3: 821 init3:
822 hub->quiescing = 0; 822 hub->quiescing = 0;
823 hub->init_done = 1;
824 823
825 status = usb_submit_urb(hub->urb, GFP_NOIO); 824 status = usb_submit_urb(hub->urb, GFP_NOIO);
826 if (status < 0) 825 if (status < 0)
@@ -861,11 +860,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
861 int i; 860 int i;
862 861
863 cancel_delayed_work_sync(&hub->init_work); 862 cancel_delayed_work_sync(&hub->init_work);
864 if (!hub->init_done) {
865 hub->init_done = 1;
866 usb_autopm_put_interface_no_suspend(
867 to_usb_interface(hub->intfdev));
868 }
869 863
870 /* khubd and related activity won't re-trigger */ 864 /* khubd and related activity won't re-trigger */
871 hub->quiescing = 1; 865 hub->quiescing = 1;
@@ -1224,6 +1218,9 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
1224 desc = intf->cur_altsetting; 1218 desc = intf->cur_altsetting;
1225 hdev = interface_to_usbdev(intf); 1219 hdev = interface_to_usbdev(intf);
1226 1220
1221 /* Hubs have proper suspend/resume support */
1222 usb_enable_autosuspend(hdev);
1223
1227 if (hdev->level == MAX_TOPO_LEVEL) { 1224 if (hdev->level == MAX_TOPO_LEVEL) {
1228 dev_err(&intf->dev, 1225 dev_err(&intf->dev,
1229 "Unsupported bus topology: hub nested too deep\n"); 1226 "Unsupported bus topology: hub nested too deep\n");
@@ -1402,10 +1399,8 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
1402 if (udev->children[i]) 1399 if (udev->children[i])
1403 recursively_mark_NOTATTACHED(udev->children[i]); 1400 recursively_mark_NOTATTACHED(udev->children[i]);
1404 } 1401 }
1405 if (udev->state == USB_STATE_SUSPENDED) { 1402 if (udev->state == USB_STATE_SUSPENDED)
1406 udev->discon_suspended = 1;
1407 udev->active_duration -= jiffies; 1403 udev->active_duration -= jiffies;
1408 }
1409 udev->state = USB_STATE_NOTATTACHED; 1404 udev->state = USB_STATE_NOTATTACHED;
1410} 1405}
1411 1406
@@ -1448,11 +1443,11 @@ void usb_set_device_state(struct usb_device *udev,
1448 || new_state == USB_STATE_SUSPENDED) 1443 || new_state == USB_STATE_SUSPENDED)
1449 ; /* No change to wakeup settings */ 1444 ; /* No change to wakeup settings */
1450 else if (new_state == USB_STATE_CONFIGURED) 1445 else if (new_state == USB_STATE_CONFIGURED)
1451 device_init_wakeup(&udev->dev, 1446 device_set_wakeup_capable(&udev->dev,
1452 (udev->actconfig->desc.bmAttributes 1447 (udev->actconfig->desc.bmAttributes
1453 & USB_CONFIG_ATT_WAKEUP)); 1448 & USB_CONFIG_ATT_WAKEUP));
1454 else 1449 else
1455 device_init_wakeup(&udev->dev, 0); 1450 device_set_wakeup_capable(&udev->dev, 0);
1456 } 1451 }
1457 if (udev->state == USB_STATE_SUSPENDED && 1452 if (udev->state == USB_STATE_SUSPENDED &&
1458 new_state != USB_STATE_SUSPENDED) 1453 new_state != USB_STATE_SUSPENDED)
@@ -1529,31 +1524,15 @@ static void update_address(struct usb_device *udev, int devnum)
1529 udev->devnum = devnum; 1524 udev->devnum = devnum;
1530} 1525}
1531 1526
1532#ifdef CONFIG_USB_SUSPEND 1527static void hub_free_dev(struct usb_device *udev)
1533
1534static void usb_stop_pm(struct usb_device *udev)
1535{ 1528{
1536 /* Synchronize with the ksuspend thread to prevent any more 1529 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
1537 * autosuspend requests from being submitted, and decrement
1538 * the parent's count of unsuspended children.
1539 */
1540 usb_pm_lock(udev);
1541 if (udev->parent && !udev->discon_suspended)
1542 usb_autosuspend_device(udev->parent);
1543 usb_pm_unlock(udev);
1544 1530
1545 /* Stop any autosuspend or autoresume requests already submitted */ 1531 /* Root hubs aren't real devices, so don't free HCD resources */
1546 cancel_delayed_work_sync(&udev->autosuspend); 1532 if (hcd->driver->free_dev && udev->parent)
1547 cancel_work_sync(&udev->autoresume); 1533 hcd->driver->free_dev(hcd, udev);
1548} 1534}
1549 1535
1550#else
1551
1552static inline void usb_stop_pm(struct usb_device *udev)
1553{ }
1554
1555#endif
1556
1557/** 1536/**
1558 * usb_disconnect - disconnect a device (usbcore-internal) 1537 * usb_disconnect - disconnect a device (usbcore-internal)
1559 * @pdev: pointer to device being disconnected 1538 * @pdev: pointer to device being disconnected
@@ -1622,7 +1601,7 @@ void usb_disconnect(struct usb_device **pdev)
1622 *pdev = NULL; 1601 *pdev = NULL;
1623 spin_unlock_irq(&device_state_lock); 1602 spin_unlock_irq(&device_state_lock);
1624 1603
1625 usb_stop_pm(udev); 1604 hub_free_dev(udev);
1626 1605
1627 put_device(&udev->dev); 1606 put_device(&udev->dev);
1628} 1607}
@@ -1799,9 +1778,18 @@ int usb_new_device(struct usb_device *udev)
1799{ 1778{
1800 int err; 1779 int err;
1801 1780
1802 /* Increment the parent's count of unsuspended children */ 1781 if (udev->parent) {
1803 if (udev->parent) 1782 /* Initialize non-root-hub device wakeup to disabled;
1804 usb_autoresume_device(udev->parent); 1783 * device (un)configuration controls wakeup capable
1784 * sysfs power/wakeup controls wakeup enabled/disabled
1785 */
1786 device_init_wakeup(&udev->dev, 0);
1787 device_set_wakeup_enable(&udev->dev, 1);
1788 }
1789
1790 /* Tell the runtime-PM framework the device is active */
1791 pm_runtime_set_active(&udev->dev);
1792 pm_runtime_enable(&udev->dev);
1805 1793
1806 usb_detect_quirks(udev); 1794 usb_detect_quirks(udev);
1807 err = usb_enumerate_device(udev); /* Read descriptors */ 1795 err = usb_enumerate_device(udev); /* Read descriptors */
@@ -1833,7 +1821,8 @@ int usb_new_device(struct usb_device *udev)
1833 1821
1834fail: 1822fail:
1835 usb_set_device_state(udev, USB_STATE_NOTATTACHED); 1823 usb_set_device_state(udev, USB_STATE_NOTATTACHED);
1836 usb_stop_pm(udev); 1824 pm_runtime_disable(&udev->dev);
1825 pm_runtime_set_suspended(&udev->dev);
1837 return err; 1826 return err;
1838} 1827}
1839 1828
@@ -1982,7 +1971,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
1982 if (!(portstatus & USB_PORT_STAT_RESET) && 1971 if (!(portstatus & USB_PORT_STAT_RESET) &&
1983 (portstatus & USB_PORT_STAT_ENABLE)) { 1972 (portstatus & USB_PORT_STAT_ENABLE)) {
1984 if (hub_is_wusb(hub)) 1973 if (hub_is_wusb(hub))
1985 udev->speed = USB_SPEED_VARIABLE; 1974 udev->speed = USB_SPEED_WIRELESS;
1986 else if (portstatus & USB_PORT_STAT_HIGH_SPEED) 1975 else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
1987 udev->speed = USB_SPEED_HIGH; 1976 udev->speed = USB_SPEED_HIGH;
1988 else if (portstatus & USB_PORT_STAT_LOW_SPEED) 1977 else if (portstatus & USB_PORT_STAT_LOW_SPEED)
@@ -2008,7 +1997,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2008 struct usb_device *udev, unsigned int delay) 1997 struct usb_device *udev, unsigned int delay)
2009{ 1998{
2010 int i, status; 1999 int i, status;
2000 struct usb_hcd *hcd;
2011 2001
2002 hcd = bus_to_hcd(udev->bus);
2012 /* Block EHCI CF initialization during the port reset. 2003 /* Block EHCI CF initialization during the port reset.
2013 * Some companion controllers don't like it when they mix. 2004 * Some companion controllers don't like it when they mix.
2014 */ 2005 */
@@ -2036,6 +2027,14 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2036 /* TRSTRCY = 10 ms; plus some extra */ 2027 /* TRSTRCY = 10 ms; plus some extra */
2037 msleep(10 + 40); 2028 msleep(10 + 40);
2038 update_address(udev, 0); 2029 update_address(udev, 0);
2030 if (hcd->driver->reset_device) {
2031 status = hcd->driver->reset_device(hcd, udev);
2032 if (status < 0) {
2033 dev_err(&udev->dev, "Cannot reset "
2034 "HCD device state\n");
2035 break;
2036 }
2037 }
2039 /* FALL THROUGH */ 2038 /* FALL THROUGH */
2040 case -ENOTCONN: 2039 case -ENOTCONN:
2041 case -ENODEV: 2040 case -ENODEV:
@@ -2381,14 +2380,17 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2381} 2380}
2382 2381
2383/* caller has locked udev */ 2382/* caller has locked udev */
2384static int remote_wakeup(struct usb_device *udev) 2383int usb_remote_wakeup(struct usb_device *udev)
2385{ 2384{
2386 int status = 0; 2385 int status = 0;
2387 2386
2388 if (udev->state == USB_STATE_SUSPENDED) { 2387 if (udev->state == USB_STATE_SUSPENDED) {
2389 dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); 2388 dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
2390 usb_mark_last_busy(udev); 2389 status = usb_autoresume_device(udev);
2391 status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME); 2390 if (status == 0) {
2391 /* Let the drivers do their thing, then... */
2392 usb_autosuspend_device(udev);
2393 }
2392 } 2394 }
2393 return status; 2395 return status;
2394} 2396}
@@ -2425,11 +2427,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2425 return status; 2427 return status;
2426} 2428}
2427 2429
2428static inline int remote_wakeup(struct usb_device *udev)
2429{
2430 return 0;
2431}
2432
2433#endif 2430#endif
2434 2431
2435static int hub_suspend(struct usb_interface *intf, pm_message_t msg) 2432static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
@@ -2496,11 +2493,6 @@ EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
2496 2493
2497#else /* CONFIG_PM */ 2494#else /* CONFIG_PM */
2498 2495
2499static inline int remote_wakeup(struct usb_device *udev)
2500{
2501 return 0;
2502}
2503
2504#define hub_suspend NULL 2496#define hub_suspend NULL
2505#define hub_resume NULL 2497#define hub_resume NULL
2506#define hub_reset_resume NULL 2498#define hub_reset_resume NULL
@@ -2645,14 +2637,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2645 2637
2646 mutex_lock(&usb_address0_mutex); 2638 mutex_lock(&usb_address0_mutex);
2647 2639
2648 if ((hcd->driver->flags & HCD_USB3) && udev->config) { 2640 if (!udev->config && oldspeed == USB_SPEED_SUPER) {
2649 /* FIXME this will need special handling by the xHCI driver. */
2650 dev_dbg(&udev->dev,
2651 "xHCI reset of configured device "
2652 "not supported yet.\n");
2653 retval = -EINVAL;
2654 goto fail;
2655 } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
2656 /* Don't reset USB 3.0 devices during an initial setup */ 2641 /* Don't reset USB 3.0 devices during an initial setup */
2657 usb_set_device_state(udev, USB_STATE_DEFAULT); 2642 usb_set_device_state(udev, USB_STATE_DEFAULT);
2658 } else { 2643 } else {
@@ -2678,7 +2663,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2678 */ 2663 */
2679 switch (udev->speed) { 2664 switch (udev->speed) {
2680 case USB_SPEED_SUPER: 2665 case USB_SPEED_SUPER:
2681 case USB_SPEED_VARIABLE: /* fixed at 512 */ 2666 case USB_SPEED_WIRELESS: /* fixed at 512 */
2682 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); 2667 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
2683 break; 2668 break;
2684 case USB_SPEED_HIGH: /* fixed at 64 */ 2669 case USB_SPEED_HIGH: /* fixed at 64 */
@@ -2706,7 +2691,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2706 case USB_SPEED_SUPER: 2691 case USB_SPEED_SUPER:
2707 speed = "super"; 2692 speed = "super";
2708 break; 2693 break;
2709 case USB_SPEED_VARIABLE: 2694 case USB_SPEED_WIRELESS:
2710 speed = "variable"; 2695 speed = "variable";
2711 type = "Wireless "; 2696 type = "Wireless ";
2712 break; 2697 break;
@@ -3006,7 +2991,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
3006 /* For a suspended device, treat this as a 2991 /* For a suspended device, treat this as a
3007 * remote wakeup event. 2992 * remote wakeup event.
3008 */ 2993 */
3009 status = remote_wakeup(udev); 2994 status = usb_remote_wakeup(udev);
3010#endif 2995#endif
3011 2996
3012 } else { 2997 } else {
@@ -3192,6 +3177,7 @@ loop_disable:
3192loop: 3177loop:
3193 usb_ep0_reinit(udev); 3178 usb_ep0_reinit(udev);
3194 release_address(udev); 3179 release_address(udev);
3180 hub_free_dev(udev);
3195 usb_put_dev(udev); 3181 usb_put_dev(udev);
3196 if ((status == -ENOTCONN) || (status == -ENOTSUPP)) 3182 if ((status == -ENOTCONN) || (status == -ENOTSUPP))
3197 break; 3183 break;
@@ -3259,7 +3245,7 @@ static void hub_events(void)
3259 * disconnected while waiting for the lock to succeed. */ 3245 * disconnected while waiting for the lock to succeed. */
3260 usb_lock_device(hdev); 3246 usb_lock_device(hdev);
3261 if (unlikely(hub->disconnected)) 3247 if (unlikely(hub->disconnected))
3262 goto loop2; 3248 goto loop_disconnected;
3263 3249
3264 /* If the hub has died, clean up after it */ 3250 /* If the hub has died, clean up after it */
3265 if (hdev->state == USB_STATE_NOTATTACHED) { 3251 if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3352,7 +3338,7 @@ static void hub_events(void)
3352 msleep(10); 3338 msleep(10);
3353 3339
3354 usb_lock_device(udev); 3340 usb_lock_device(udev);
3355 ret = remote_wakeup(hdev-> 3341 ret = usb_remote_wakeup(hdev->
3356 children[i-1]); 3342 children[i-1]);
3357 usb_unlock_device(udev); 3343 usb_unlock_device(udev);
3358 if (ret < 0) 3344 if (ret < 0)
@@ -3419,7 +3405,7 @@ static void hub_events(void)
3419 * kick_khubd() and allow autosuspend. 3405 * kick_khubd() and allow autosuspend.
3420 */ 3406 */
3421 usb_autopm_put_interface(intf); 3407 usb_autopm_put_interface(intf);
3422 loop2: 3408 loop_disconnected:
3423 usb_unlock_device(hdev); 3409 usb_unlock_device(hdev);
3424 kref_put(&hub->kref, hub_release); 3410 kref_put(&hub->kref, hub_release);
3425 3411
@@ -3446,7 +3432,7 @@ static int hub_thread(void *__unused)
3446 return 0; 3432 return 0;
3447} 3433}
3448 3434
3449static struct usb_device_id hub_id_table [] = { 3435static const struct usb_device_id hub_id_table[] = {
3450 { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, 3436 { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
3451 .bDeviceClass = USB_CLASS_HUB}, 3437 .bDeviceClass = USB_CLASS_HUB},
3452 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, 3438 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index df73574a9cc9..cd220277c6c3 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1316,7 +1316,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1316 1316
1317 alt = usb_altnum_to_altsetting(iface, alternate); 1317 alt = usb_altnum_to_altsetting(iface, alternate);
1318 if (!alt) { 1318 if (!alt) {
1319 dev_warn(&dev->dev, "selecting invalid altsetting %d", 1319 dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
1320 alternate); 1320 alternate);
1321 return -EINVAL; 1321 return -EINVAL;
1322 } 1322 }
@@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev)
1471 /* If not, reinstate the old alternate settings */ 1471 /* If not, reinstate the old alternate settings */
1472 if (retval < 0) { 1472 if (retval < 0) {
1473reset_old_alts: 1473reset_old_alts:
1474 for (; i >= 0; i--) { 1474 for (i--; i >= 0; i--) {
1475 struct usb_interface *intf = config->interface[i]; 1475 struct usb_interface *intf = config->interface[i];
1476 struct usb_host_interface *alt; 1476 struct usb_host_interface *alt;
1477 1477
@@ -1843,7 +1843,6 @@ free_interfaces:
1843 intf->dev.dma_mask = dev->dev.dma_mask; 1843 intf->dev.dma_mask = dev->dev.dma_mask;
1844 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1844 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1845 device_initialize(&intf->dev); 1845 device_initialize(&intf->dev);
1846 mark_quiesced(intf);
1847 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1846 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1848 dev->bus->busnum, dev->devpath, 1847 dev->bus->busnum, dev->devpath,
1849 configuration, alt->desc.bInterfaceNumber); 1848 configuration, alt->desc.bInterfaceNumber);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ab93918d9207..f073c5cb4e7b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -103,10 +103,19 @@ void usb_detect_quirks(struct usb_device *udev)
103 dev_dbg(&udev->dev, "USB quirks for this device: %x\n", 103 dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
104 udev->quirks); 104 udev->quirks);
105 105
106 /* By default, disable autosuspend for all non-hubs */
107#ifdef CONFIG_USB_SUSPEND 106#ifdef CONFIG_USB_SUSPEND
108 if (udev->descriptor.bDeviceClass != USB_CLASS_HUB) 107
109 udev->autosuspend_disabled = 1; 108 /* By default, disable autosuspend for all devices. The hub driver
109 * will enable it for hubs.
110 */
111 usb_disable_autosuspend(udev);
112
113 /* Autosuspend can also be disabled if the initial autosuspend_delay
114 * is negative.
115 */
116 if (udev->autosuspend_delay < 0)
117 usb_autoresume_device(udev);
118
110#endif 119#endif
111 120
112 /* For the present, all devices default to USB-PERSIST enabled */ 121 /* For the present, all devices default to USB-PERSIST enabled */
@@ -120,6 +129,7 @@ void usb_detect_quirks(struct usb_device *udev)
120 * for all devices. It will affect things like hub resets 129 * for all devices. It will affect things like hub resets
121 * and EMF-related port disables. 130 * and EMF-related port disables.
122 */ 131 */
123 udev->persist_enabled = 1; 132 if (!(udev->quirks & USB_QUIRK_RESET_MORPHS))
133 udev->persist_enabled = 1;
124#endif /* CONFIG_PM */ 134#endif /* CONFIG_PM */
125} 135}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5f3908f6e2dc..43c002e3a9aa 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -115,7 +115,7 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
115 case USB_SPEED_HIGH: 115 case USB_SPEED_HIGH:
116 speed = "480"; 116 speed = "480";
117 break; 117 break;
118 case USB_SPEED_VARIABLE: 118 case USB_SPEED_WIRELESS:
119 speed = "480"; 119 speed = "480";
120 break; 120 break;
121 case USB_SPEED_SUPER: 121 case USB_SPEED_SUPER:
@@ -191,6 +191,36 @@ show_quirks(struct device *dev, struct device_attribute *attr, char *buf)
191static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL); 191static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
192 192
193static ssize_t 193static ssize_t
194show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf)
195{
196 struct usb_device *udev;
197
198 udev = to_usb_device(dev);
199 return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET_MORPHS));
200}
201
202static ssize_t
203set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr,
204 const char *buf, size_t count)
205{
206 struct usb_device *udev = to_usb_device(dev);
207 int config;
208
209 if (sscanf(buf, "%d", &config) != 1 || config < 0 || config > 1)
210 return -EINVAL;
211 usb_lock_device(udev);
212 if (config)
213 udev->quirks |= USB_QUIRK_RESET_MORPHS;
214 else
215 udev->quirks &= ~USB_QUIRK_RESET_MORPHS;
216 usb_unlock_device(udev);
217 return count;
218}
219
220static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR,
221 show_avoid_reset_quirk, set_avoid_reset_quirk);
222
223static ssize_t
194show_urbnum(struct device *dev, struct device_attribute *attr, char *buf) 224show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
195{ 225{
196 struct usb_device *udev; 226 struct usb_device *udev;
@@ -226,9 +256,10 @@ set_persist(struct device *dev, struct device_attribute *attr,
226 256
227 if (sscanf(buf, "%d", &value) != 1) 257 if (sscanf(buf, "%d", &value) != 1)
228 return -EINVAL; 258 return -EINVAL;
229 usb_pm_lock(udev); 259
260 usb_lock_device(udev);
230 udev->persist_enabled = !!value; 261 udev->persist_enabled = !!value;
231 usb_pm_unlock(udev); 262 usb_unlock_device(udev);
232 return count; 263 return count;
233} 264}
234 265
@@ -315,20 +346,34 @@ set_autosuspend(struct device *dev, struct device_attribute *attr,
315 const char *buf, size_t count) 346 const char *buf, size_t count)
316{ 347{
317 struct usb_device *udev = to_usb_device(dev); 348 struct usb_device *udev = to_usb_device(dev);
318 int value; 349 int value, old_delay;
350 int rc;
319 351
320 if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ || 352 if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ ||
321 value <= - INT_MAX/HZ) 353 value <= - INT_MAX/HZ)
322 return -EINVAL; 354 return -EINVAL;
323 value *= HZ; 355 value *= HZ;
324 356
357 usb_lock_device(udev);
358 old_delay = udev->autosuspend_delay;
325 udev->autosuspend_delay = value; 359 udev->autosuspend_delay = value;
326 if (value >= 0) 360
327 usb_try_autosuspend_device(udev); 361 if (old_delay < 0) { /* Autosuspend wasn't allowed */
328 else { 362 if (value >= 0)
329 if (usb_autoresume_device(udev) == 0)
330 usb_autosuspend_device(udev); 363 usb_autosuspend_device(udev);
364 } else { /* Autosuspend was allowed */
365 if (value < 0) {
366 rc = usb_autoresume_device(udev);
367 if (rc < 0) {
368 count = rc;
369 udev->autosuspend_delay = old_delay;
370 }
371 } else {
372 usb_try_autosuspend_device(udev);
373 }
331 } 374 }
375
376 usb_unlock_device(udev);
332 return count; 377 return count;
333} 378}
334 379
@@ -356,34 +401,25 @@ set_level(struct device *dev, struct device_attribute *attr,
356 struct usb_device *udev = to_usb_device(dev); 401 struct usb_device *udev = to_usb_device(dev);
357 int len = count; 402 int len = count;
358 char *cp; 403 char *cp;
359 int rc = 0; 404 int rc;
360 int old_autosuspend_disabled;
361 405
362 cp = memchr(buf, '\n', count); 406 cp = memchr(buf, '\n', count);
363 if (cp) 407 if (cp)
364 len = cp - buf; 408 len = cp - buf;
365 409
366 usb_lock_device(udev); 410 usb_lock_device(udev);
367 old_autosuspend_disabled = udev->autosuspend_disabled;
368 411
369 /* Setting the flags without calling usb_pm_lock is a subject to
370 * races, but who cares...
371 */
372 if (len == sizeof on_string - 1 && 412 if (len == sizeof on_string - 1 &&
373 strncmp(buf, on_string, len) == 0) { 413 strncmp(buf, on_string, len) == 0)
374 udev->autosuspend_disabled = 1; 414 rc = usb_disable_autosuspend(udev);
375 rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
376 415
377 } else if (len == sizeof auto_string - 1 && 416 else if (len == sizeof auto_string - 1 &&
378 strncmp(buf, auto_string, len) == 0) { 417 strncmp(buf, auto_string, len) == 0)
379 udev->autosuspend_disabled = 0; 418 rc = usb_enable_autosuspend(udev);
380 rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
381 419
382 } else 420 else
383 rc = -EINVAL; 421 rc = -EINVAL;
384 422
385 if (rc)
386 udev->autosuspend_disabled = old_autosuspend_disabled;
387 usb_unlock_device(udev); 423 usb_unlock_device(udev);
388 return (rc < 0 ? rc : count); 424 return (rc < 0 ? rc : count);
389} 425}
@@ -558,6 +594,7 @@ static struct attribute *dev_attrs[] = {
558 &dev_attr_version.attr, 594 &dev_attr_version.attr,
559 &dev_attr_maxchild.attr, 595 &dev_attr_maxchild.attr,
560 &dev_attr_quirks.attr, 596 &dev_attr_quirks.attr,
597 &dev_attr_avoid_reset_quirk.attr,
561 &dev_attr_authorized.attr, 598 &dev_attr_authorized.attr,
562 &dev_attr_remove.attr, 599 &dev_attr_remove.attr,
563 NULL, 600 NULL,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index e7cae1334693..27080561a1c2 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -387,6 +387,13 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
387 { 387 {
388 unsigned int orig_flags = urb->transfer_flags; 388 unsigned int orig_flags = urb->transfer_flags;
389 unsigned int allowed; 389 unsigned int allowed;
390 static int pipetypes[4] = {
391 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
392 };
393
394 /* Check that the pipe's type matches the endpoint's type */
395 if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
396 return -EPIPE; /* The most suitable error code :-) */
390 397
391 /* enforce simple/standard policy */ 398 /* enforce simple/standard policy */
392 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | 399 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
@@ -430,7 +437,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
430 case USB_ENDPOINT_XFER_INT: 437 case USB_ENDPOINT_XFER_INT:
431 /* too small? */ 438 /* too small? */
432 switch (dev->speed) { 439 switch (dev->speed) {
433 case USB_SPEED_VARIABLE: 440 case USB_SPEED_WIRELESS:
434 if (urb->interval < 6) 441 if (urb->interval < 6)
435 return -EINVAL; 442 return -EINVAL;
436 break; 443 break;
@@ -446,7 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
446 if (urb->interval > (1 << 15)) 453 if (urb->interval > (1 << 15))
447 return -EINVAL; 454 return -EINVAL;
448 max = 1 << 15; 455 max = 1 << 15;
449 case USB_SPEED_VARIABLE: 456 case USB_SPEED_WIRELESS:
450 if (urb->interval > 16) 457 if (urb->interval > 16)
451 return -EINVAL; 458 return -EINVAL;
452 break; 459 break;
@@ -473,7 +480,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
473 default: 480 default:
474 return -EINVAL; 481 return -EINVAL;
475 } 482 }
476 if (dev->speed != USB_SPEED_VARIABLE) { 483 if (dev->speed != USB_SPEED_WIRELESS) {
477 /* Round down to a power of 2, no more than max */ 484 /* Round down to a power of 2, no more than max */
478 urb->interval = min(max, 1 << ilog2(urb->interval)); 485 urb->interval = min(max, 1 << ilog2(urb->interval));
479 } 486 }
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0daff0d968ba..1297e9b16a51 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -49,9 +49,6 @@ const char *usbcore_name = "usbcore";
49 49
50static int nousb; /* Disable USB when built into kernel image */ 50static int nousb; /* Disable USB when built into kernel image */
51 51
52/* Workqueue for autosuspend and for remote wakeup of root hubs */
53struct workqueue_struct *ksuspend_usb_wq;
54
55#ifdef CONFIG_USB_SUSPEND 52#ifdef CONFIG_USB_SUSPEND
56static int usb_autosuspend_delay = 2; /* Default delay value, 53static int usb_autosuspend_delay = 2; /* Default delay value,
57 * in seconds */ 54 * in seconds */
@@ -228,9 +225,6 @@ static void usb_release_dev(struct device *dev)
228 hcd = bus_to_hcd(udev->bus); 225 hcd = bus_to_hcd(udev->bus);
229 226
230 usb_destroy_configuration(udev); 227 usb_destroy_configuration(udev);
231 /* Root hubs aren't real devices, so don't free HCD resources */
232 if (hcd->driver->free_dev && udev->parent)
233 hcd->driver->free_dev(hcd, udev);
234 usb_put_hcd(hcd); 228 usb_put_hcd(hcd);
235 kfree(udev->product); 229 kfree(udev->product);
236 kfree(udev->manufacturer); 230 kfree(udev->manufacturer);
@@ -264,23 +258,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
264 258
265#ifdef CONFIG_PM 259#ifdef CONFIG_PM
266 260
267static int ksuspend_usb_init(void)
268{
269 /* This workqueue is supposed to be both freezable and
270 * singlethreaded. Its job doesn't justify running on more
271 * than one CPU.
272 */
273 ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd");
274 if (!ksuspend_usb_wq)
275 return -ENOMEM;
276 return 0;
277}
278
279static void ksuspend_usb_cleanup(void)
280{
281 destroy_workqueue(ksuspend_usb_wq);
282}
283
284/* USB device Power-Management thunks. 261/* USB device Power-Management thunks.
285 * There's no need to distinguish here between quiescing a USB device 262 * There's no need to distinguish here between quiescing a USB device
286 * and powering it down; the generic_suspend() routine takes care of 263 * and powering it down; the generic_suspend() routine takes care of
@@ -296,7 +273,7 @@ static int usb_dev_prepare(struct device *dev)
296static void usb_dev_complete(struct device *dev) 273static void usb_dev_complete(struct device *dev)
297{ 274{
298 /* Currently used only for rebinding interfaces */ 275 /* Currently used only for rebinding interfaces */
299 usb_resume(dev, PMSG_RESUME); /* Message event is meaningless */ 276 usb_resume(dev, PMSG_ON); /* FIXME: change to PMSG_COMPLETE */
300} 277}
301 278
302static int usb_dev_suspend(struct device *dev) 279static int usb_dev_suspend(struct device *dev)
@@ -342,9 +319,7 @@ static const struct dev_pm_ops usb_device_pm_ops = {
342 319
343#else 320#else
344 321
345#define ksuspend_usb_init() 0 322#define usb_device_pm_ops (*(struct dev_pm_ops *) NULL)
346#define ksuspend_usb_cleanup() do {} while (0)
347#define usb_device_pm_ops (*(struct dev_pm_ops *)0)
348 323
349#endif /* CONFIG_PM */ 324#endif /* CONFIG_PM */
350 325
@@ -472,9 +447,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
472 INIT_LIST_HEAD(&dev->filelist); 447 INIT_LIST_HEAD(&dev->filelist);
473 448
474#ifdef CONFIG_PM 449#ifdef CONFIG_PM
475 mutex_init(&dev->pm_mutex);
476 INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
477 INIT_WORK(&dev->autoresume, usb_autoresume_work);
478 dev->autosuspend_delay = usb_autosuspend_delay * HZ; 450 dev->autosuspend_delay = usb_autosuspend_delay * HZ;
479 dev->connect_time = jiffies; 451 dev->connect_time = jiffies;
480 dev->active_duration = -jiffies; 452 dev->active_duration = -jiffies;
@@ -1117,9 +1089,6 @@ static int __init usb_init(void)
1117 if (retval) 1089 if (retval)
1118 goto out; 1090 goto out;
1119 1091
1120 retval = ksuspend_usb_init();
1121 if (retval)
1122 goto out;
1123 retval = bus_register(&usb_bus_type); 1092 retval = bus_register(&usb_bus_type);
1124 if (retval) 1093 if (retval)
1125 goto bus_register_failed; 1094 goto bus_register_failed;
@@ -1159,7 +1128,7 @@ major_init_failed:
1159bus_notifier_failed: 1128bus_notifier_failed:
1160 bus_unregister(&usb_bus_type); 1129 bus_unregister(&usb_bus_type);
1161bus_register_failed: 1130bus_register_failed:
1162 ksuspend_usb_cleanup(); 1131 usb_debugfs_cleanup();
1163out: 1132out:
1164 return retval; 1133 return retval;
1165} 1134}
@@ -1181,7 +1150,6 @@ static void __exit usb_exit(void)
1181 usb_hub_cleanup(); 1150 usb_hub_cleanup();
1182 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); 1151 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
1183 bus_unregister(&usb_bus_type); 1152 bus_unregister(&usb_bus_type);
1184 ksuspend_usb_cleanup();
1185 usb_debugfs_cleanup(); 1153 usb_debugfs_cleanup();
1186} 1154}
1187 1155
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 4c36c7f512a0..cd882203ad34 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -55,24 +55,8 @@ extern void usb_major_cleanup(void);
55extern int usb_suspend(struct device *dev, pm_message_t msg); 55extern int usb_suspend(struct device *dev, pm_message_t msg);
56extern int usb_resume(struct device *dev, pm_message_t msg); 56extern int usb_resume(struct device *dev, pm_message_t msg);
57 57
58extern void usb_autosuspend_work(struct work_struct *work);
59extern void usb_autoresume_work(struct work_struct *work);
60extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg); 58extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
61extern int usb_port_resume(struct usb_device *dev, pm_message_t msg); 59extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
62extern int usb_external_suspend_device(struct usb_device *udev,
63 pm_message_t msg);
64extern int usb_external_resume_device(struct usb_device *udev,
65 pm_message_t msg);
66
67static inline void usb_pm_lock(struct usb_device *udev)
68{
69 mutex_lock_nested(&udev->pm_mutex, udev->level);
70}
71
72static inline void usb_pm_unlock(struct usb_device *udev)
73{
74 mutex_unlock(&udev->pm_mutex);
75}
76 60
77#else 61#else
78 62
@@ -86,9 +70,6 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
86 return 0; 70 return 0;
87} 71}
88 72
89static inline void usb_pm_lock(struct usb_device *udev) {}
90static inline void usb_pm_unlock(struct usb_device *udev) {}
91
92#endif 73#endif
93 74
94#ifdef CONFIG_USB_SUSPEND 75#ifdef CONFIG_USB_SUSPEND
@@ -96,6 +77,7 @@ static inline void usb_pm_unlock(struct usb_device *udev) {}
96extern void usb_autosuspend_device(struct usb_device *udev); 77extern void usb_autosuspend_device(struct usb_device *udev);
97extern void usb_try_autosuspend_device(struct usb_device *udev); 78extern void usb_try_autosuspend_device(struct usb_device *udev);
98extern int usb_autoresume_device(struct usb_device *udev); 79extern int usb_autoresume_device(struct usb_device *udev);
80extern int usb_remote_wakeup(struct usb_device *dev);
99 81
100#else 82#else
101 83
@@ -106,9 +88,13 @@ static inline int usb_autoresume_device(struct usb_device *udev)
106 return 0; 88 return 0;
107} 89}
108 90
91static inline int usb_remote_wakeup(struct usb_device *udev)
92{
93 return 0;
94}
95
109#endif 96#endif
110 97
111extern struct workqueue_struct *ksuspend_usb_wq;
112extern struct bus_type usb_bus_type; 98extern struct bus_type usb_bus_type;
113extern struct device_type usb_device_type; 99extern struct device_type usb_device_type;
114extern struct device_type usb_if_device_type; 100extern struct device_type usb_if_device_type;
@@ -138,23 +124,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
138 for_devices; 124 for_devices;
139} 125}
140 126
141/* Interfaces and their "power state" are owned by usbcore */
142
143static inline void mark_active(struct usb_interface *f)
144{
145 f->is_active = 1;
146}
147
148static inline void mark_quiesced(struct usb_interface *f)
149{
150 f->is_active = 0;
151}
152
153static inline int is_active(const struct usb_interface *f)
154{
155 return f->is_active;
156}
157
158 127
159/* for labeling diagnostics */ 128/* for labeling diagnostics */
160extern const char *usbcore_name; 129extern const char *usbcore_name;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 2958a1271b20..6e98a3697844 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -66,8 +66,6 @@ static struct ehci_dev ehci_dev;
66 66
67#define USB_DEBUG_DEVNUM 127 67#define USB_DEBUG_DEVNUM 127
68 68
69#define DBGP_DATA_TOGGLE 0x8800
70
71#ifdef DBGP_DEBUG 69#ifdef DBGP_DEBUG
72#define dbgp_printk printk 70#define dbgp_printk printk
73static void dbgp_ehci_status(char *str) 71static void dbgp_ehci_status(char *str)
@@ -88,11 +86,6 @@ static inline void dbgp_ehci_status(char *str) { }
88static inline void dbgp_printk(const char *fmt, ...) { } 86static inline void dbgp_printk(const char *fmt, ...) { }
89#endif 87#endif
90 88
91static inline u32 dbgp_pid_update(u32 x, u32 tok)
92{
93 return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
94}
95
96static inline u32 dbgp_len_update(u32 x, u32 len) 89static inline u32 dbgp_len_update(u32 x, u32 len)
97{ 90{
98 return (x & ~0x0f) | (len & 0x0f); 91 return (x & ~0x0f) | (len & 0x0f);
@@ -136,6 +129,19 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
136 129
137#define DBGP_MAX_PACKET 8 130#define DBGP_MAX_PACKET 8
138#define DBGP_TIMEOUT (250 * 1000) 131#define DBGP_TIMEOUT (250 * 1000)
132#define DBGP_LOOPS 1000
133
134static inline u32 dbgp_pid_write_update(u32 x, u32 tok)
135{
136 static int data0 = USB_PID_DATA1;
137 data0 ^= USB_PID_DATA_TOGGLE;
138 return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff);
139}
140
141static inline u32 dbgp_pid_read_update(u32 x, u32 tok)
142{
143 return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff);
144}
139 145
140static int dbgp_wait_until_complete(void) 146static int dbgp_wait_until_complete(void)
141{ 147{
@@ -180,7 +186,7 @@ static int dbgp_wait_until_done(unsigned ctrl)
180{ 186{
181 u32 pids, lpid; 187 u32 pids, lpid;
182 int ret; 188 int ret;
183 int loop = 3; 189 int loop = DBGP_LOOPS;
184 190
185retry: 191retry:
186 writel(ctrl | DBGP_GO, &ehci_debug->control); 192 writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -197,6 +203,8 @@ retry:
197 */ 203 */
198 if (ret == -DBGP_TIMEOUT && !dbgp_not_safe) 204 if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
199 dbgp_not_safe = 1; 205 dbgp_not_safe = 1;
206 if (ret == -DBGP_ERR_BAD && --loop > 0)
207 goto retry;
200 return ret; 208 return ret;
201 } 209 }
202 210
@@ -245,12 +253,20 @@ static inline void dbgp_get_data(void *buf, int size)
245 bytes[i] = (hi >> (8*(i - 4))) & 0xff; 253 bytes[i] = (hi >> (8*(i - 4))) & 0xff;
246} 254}
247 255
248static int dbgp_out(u32 addr, const char *bytes, int size) 256static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
257 const char *bytes, int size)
249{ 258{
259 int ret;
260 u32 addr;
250 u32 pids, ctrl; 261 u32 pids, ctrl;
251 262
263 if (size > DBGP_MAX_PACKET)
264 return -1;
265
266 addr = DBGP_EPADDR(devnum, endpoint);
267
252 pids = readl(&ehci_debug->pids); 268 pids = readl(&ehci_debug->pids);
253 pids = dbgp_pid_update(pids, USB_PID_OUT); 269 pids = dbgp_pid_write_update(pids, USB_PID_OUT);
254 270
255 ctrl = readl(&ehci_debug->control); 271 ctrl = readl(&ehci_debug->control);
256 ctrl = dbgp_len_update(ctrl, size); 272 ctrl = dbgp_len_update(ctrl, size);
@@ -260,34 +276,7 @@ static int dbgp_out(u32 addr, const char *bytes, int size)
260 dbgp_set_data(bytes, size); 276 dbgp_set_data(bytes, size);
261 writel(addr, &ehci_debug->address); 277 writel(addr, &ehci_debug->address);
262 writel(pids, &ehci_debug->pids); 278 writel(pids, &ehci_debug->pids);
263 return dbgp_wait_until_done(ctrl); 279 ret = dbgp_wait_until_done(ctrl);
264}
265
266static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
267 const char *bytes, int size)
268{
269 int ret;
270 int loops = 5;
271 u32 addr;
272 if (size > DBGP_MAX_PACKET)
273 return -1;
274
275 addr = DBGP_EPADDR(devnum, endpoint);
276try_again:
277 if (loops--) {
278 ret = dbgp_out(addr, bytes, size);
279 if (ret == -DBGP_ERR_BAD) {
280 int try_loops = 3;
281 do {
282 /* Emit a dummy packet to re-sync communication
283 * with the debug device */
284 if (dbgp_out(addr, "12345678", 8) >= 0) {
285 udelay(2);
286 goto try_again;
287 }
288 } while (try_loops--);
289 }
290 }
291 280
292 return ret; 281 return ret;
293} 282}
@@ -304,7 +293,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
304 addr = DBGP_EPADDR(devnum, endpoint); 293 addr = DBGP_EPADDR(devnum, endpoint);
305 294
306 pids = readl(&ehci_debug->pids); 295 pids = readl(&ehci_debug->pids);
307 pids = dbgp_pid_update(pids, USB_PID_IN); 296 pids = dbgp_pid_read_update(pids, USB_PID_IN);
308 297
309 ctrl = readl(&ehci_debug->control); 298 ctrl = readl(&ehci_debug->control);
310 ctrl = dbgp_len_update(ctrl, size); 299 ctrl = dbgp_len_update(ctrl, size);
@@ -362,7 +351,6 @@ static int dbgp_control_msg(unsigned devnum, int requesttype,
362 return dbgp_bulk_read(devnum, 0, data, size); 351 return dbgp_bulk_read(devnum, 0, data, size);
363} 352}
364 353
365
366/* Find a PCI capability */ 354/* Find a PCI capability */
367static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap) 355static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
368{ 356{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index ee411206c699..7460cd797f45 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -812,6 +812,16 @@ config USB_CDC_COMPOSITE
812 Say "y" to link the driver statically, or "m" to build a 812 Say "y" to link the driver statically, or "m" to build a
813 dynamically linked module. 813 dynamically linked module.
814 814
815config USB_G_NOKIA
816 tristate "Nokia composite gadget"
817 depends on PHONET
818 help
819 The Nokia composite gadget provides support for acm, obex
820 and phonet in only one composite gadget driver.
821
822 It's only really useful for N900 hardware. If you're building
823 a kernel for N900, say Y or M here. If unsure, say N.
824
815config USB_G_MULTI 825config USB_G_MULTI
816 tristate "Multifunction Composite Gadget (EXPERIMENTAL)" 826 tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
817 depends on BLOCK && NET 827 depends on BLOCK && NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 2e2c047262b7..43b51da8d727 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -43,6 +43,7 @@ g_mass_storage-objs := mass_storage.o
43g_printer-objs := printer.o 43g_printer-objs := printer.o
44g_cdc-objs := cdc2.o 44g_cdc-objs := cdc2.o
45g_multi-objs := multi.o 45g_multi-objs := multi.o
46g_nokia-objs := nokia.o
46 47
47obj-$(CONFIG_USB_ZERO) += g_zero.o 48obj-$(CONFIG_USB_ZERO) += g_zero.o
48obj-$(CONFIG_USB_AUDIO) += g_audio.o 49obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
55obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o 56obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
56obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o 57obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
57obj-$(CONFIG_USB_G_MULTI) += g_multi.o 58obj-$(CONFIG_USB_G_MULTI) += g_multi.o
59obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
58 60
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 043e04db2a05..12ac9cd32a07 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1656,9 +1656,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
1656 if (!res) 1656 if (!res)
1657 return -ENXIO; 1657 return -ENXIO;
1658 1658
1659 if (!request_mem_region(res->start, 1659 if (!request_mem_region(res->start, resource_size(res), driver_name)) {
1660 res->end - res->start + 1,
1661 driver_name)) {
1662 DBG("someone's using UDC memory\n"); 1660 DBG("someone's using UDC memory\n");
1663 return -EBUSY; 1661 return -EBUSY;
1664 } 1662 }
@@ -1699,7 +1697,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
1699 udc->ep[3].maxpacket = 64; 1697 udc->ep[3].maxpacket = 64;
1700 } 1698 }
1701 1699
1702 udc->udp_baseaddr = ioremap(res->start, res->end - res->start + 1); 1700 udc->udp_baseaddr = ioremap(res->start, resource_size(res));
1703 if (!udc->udp_baseaddr) { 1701 if (!udc->udp_baseaddr) {
1704 retval = -ENOMEM; 1702 retval = -ENOMEM;
1705 goto fail0a; 1703 goto fail0a;
@@ -1781,7 +1779,7 @@ fail0a:
1781 if (cpu_is_at91rm9200()) 1779 if (cpu_is_at91rm9200())
1782 gpio_free(udc->board.pullup_pin); 1780 gpio_free(udc->board.pullup_pin);
1783fail0: 1781fail0:
1784 release_mem_region(res->start, res->end - res->start + 1); 1782 release_mem_region(res->start, resource_size(res));
1785 DBG("%s probe failed, %d\n", driver_name, retval); 1783 DBG("%s probe failed, %d\n", driver_name, retval);
1786 return retval; 1784 return retval;
1787} 1785}
@@ -1813,7 +1811,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
1813 gpio_free(udc->board.pullup_pin); 1811 gpio_free(udc->board.pullup_pin);
1814 1812
1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1813 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1816 release_mem_region(res->start, res->end - res->start + 1); 1814 release_mem_region(res->start, resource_size(res));
1817 1815
1818 clk_put(udc->iclk); 1816 clk_put(udc->iclk);
1819 clk_put(udc->fclk); 1817 clk_put(udc->fclk);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 4e970cf0e29a..f79bdfe4bed9 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -320,7 +320,7 @@ static inline void usba_cleanup_debugfs(struct usba_udc *udc)
320static int vbus_is_present(struct usba_udc *udc) 320static int vbus_is_present(struct usba_udc *udc)
321{ 321{
322 if (gpio_is_valid(udc->vbus_pin)) 322 if (gpio_is_valid(udc->vbus_pin))
323 return gpio_get_value(udc->vbus_pin); 323 return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
324 324
325 /* No Vbus detection: Assume always present */ 325 /* No Vbus detection: Assume always present */
326 return 1; 326 return 1;
@@ -1763,7 +1763,7 @@ static irqreturn_t usba_vbus_irq(int irq, void *devid)
1763 if (!udc->driver) 1763 if (!udc->driver)
1764 goto out; 1764 goto out;
1765 1765
1766 vbus = gpio_get_value(udc->vbus_pin); 1766 vbus = vbus_is_present(udc);
1767 if (vbus != udc->vbus_prev) { 1767 if (vbus != udc->vbus_prev) {
1768 if (vbus) { 1768 if (vbus) {
1769 toggle_bias(1); 1769 toggle_bias(1);
@@ -1914,14 +1914,14 @@ static int __init usba_udc_probe(struct platform_device *pdev)
1914 udc->vbus_pin = -ENODEV; 1914 udc->vbus_pin = -ENODEV;
1915 1915
1916 ret = -ENOMEM; 1916 ret = -ENOMEM;
1917 udc->regs = ioremap(regs->start, regs->end - regs->start + 1); 1917 udc->regs = ioremap(regs->start, resource_size(regs));
1918 if (!udc->regs) { 1918 if (!udc->regs) {
1919 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n"); 1919 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
1920 goto err_map_regs; 1920 goto err_map_regs;
1921 } 1921 }
1922 dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n", 1922 dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
1923 (unsigned long)regs->start, udc->regs); 1923 (unsigned long)regs->start, udc->regs);
1924 udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1); 1924 udc->fifo = ioremap(fifo->start, resource_size(fifo));
1925 if (!udc->fifo) { 1925 if (!udc->fifo) {
1926 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n"); 1926 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
1927 goto err_map_fifo; 1927 goto err_map_fifo;
@@ -2000,6 +2000,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
2000 if (gpio_is_valid(pdata->vbus_pin)) { 2000 if (gpio_is_valid(pdata->vbus_pin)) {
2001 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) { 2001 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
2002 udc->vbus_pin = pdata->vbus_pin; 2002 udc->vbus_pin = pdata->vbus_pin;
2003 udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
2003 2004
2004 ret = request_irq(gpio_to_irq(udc->vbus_pin), 2005 ret = request_irq(gpio_to_irq(udc->vbus_pin),
2005 usba_vbus_irq, 0, 2006 usba_vbus_irq, 0,
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index f7baea307f0d..88a2e07a11a8 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -323,6 +323,7 @@ struct usba_udc {
323 struct platform_device *pdev; 323 struct platform_device *pdev;
324 int irq; 324 int irq;
325 int vbus_pin; 325 int vbus_pin;
326 int vbus_pin_inverted;
326 struct clk *pclk; 327 struct clk *pclk;
327 struct clk *hclk; 328 struct clk *hclk;
328 329
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index cd0914ec898e..65a5f94cbc04 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -265,16 +265,24 @@ struct usb_ep * __init usb_ep_autoconfig (
265 return ep; 265 return ep;
266 } 266 }
267 267
268 } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) { 268#ifdef CONFIG_BLACKFIN
269 /* single buffering is enough; maybe 8 byte fifo is too */ 269 } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) {
270 ep = find_ep (gadget, "ep3in-bulk"); 270 if ((USB_ENDPOINT_XFER_BULK == type) ||
271 if (ep && ep_matches (gadget, ep, desc)) 271 (USB_ENDPOINT_XFER_ISOC == type)) {
272 return ep; 272 if (USB_DIR_IN & desc->bEndpointAddress)
273 273 ep = find_ep (gadget, "ep5in");
274 } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) { 274 else
275 ep = find_ep (gadget, "ep1-bulk"); 275 ep = find_ep (gadget, "ep6out");
276 } else if (USB_ENDPOINT_XFER_INT == type) {
277 if (USB_DIR_IN & desc->bEndpointAddress)
278 ep = find_ep(gadget, "ep1in");
279 else
280 ep = find_ep(gadget, "ep2out");
281 } else
282 ep = NULL;
276 if (ep && ep_matches (gadget, ep, desc)) 283 if (ep && ep_matches (gadget, ep, desc))
277 return ep; 284 return ep;
285#endif
278 } 286 }
279 287
280 /* Second, look at endpoints until an unclaimed one looks usable */ 288 /* Second, look at endpoints until an unclaimed one looks usable */
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 141372b6e7a1..400f80372d93 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -259,7 +259,7 @@ static struct usb_configuration rndis_config_driver = {
259 259
260/*-------------------------------------------------------------------------*/ 260/*-------------------------------------------------------------------------*/
261 261
262#ifdef USB_ETH_EEM 262#ifdef CONFIG_USB_ETH_EEM
263static int use_eem = 1; 263static int use_eem = 1;
264#else 264#else
265static int use_eem; 265static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d10353d46b86..e49c7325dce2 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -702,14 +702,6 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f)
702/* Some controllers can't support CDC ACM ... */ 702/* Some controllers can't support CDC ACM ... */
703static inline bool can_support_cdc(struct usb_configuration *c) 703static inline bool can_support_cdc(struct usb_configuration *c)
704{ 704{
705 /* SH3 doesn't support multiple interfaces */
706 if (gadget_is_sh(c->cdev->gadget))
707 return false;
708
709 /* sa1100 doesn't have a third interrupt endpoint */
710 if (gadget_is_sa1100(c->cdev->gadget))
711 return false;
712
713 /* everything else is *probably* fine ... */ 705 /* everything else is *probably* fine ... */
714 return true; 706 return true;
715} 707}
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index ecf5bdd0ae06..2fff530efc19 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -497,12 +497,9 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
497 struct net_device *net; 497 struct net_device *net;
498 498
499 /* Enable zlps by default for ECM conformance; 499 /* Enable zlps by default for ECM conformance;
500 * override for musb_hdrc (avoids txdma ovhead) 500 * override for musb_hdrc (avoids txdma ovhead).
501 * and sa1100 (can't).
502 */ 501 */
503 ecm->port.is_zlp_ok = !( 502 ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
504 gadget_is_sa1100(cdev->gadget)
505 || gadget_is_musbhdrc(cdev->gadget)
506 ); 503 );
507 ecm->port.cdc_filter = DEFAULT_FILTER; 504 ecm->port.cdc_filter = DEFAULT_FILTER;
508 DBG(cdev, "activate ecm\n"); 505 DBG(cdev, "activate ecm\n");
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a37640eba434..5a3cdd08f1d0 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -368,7 +368,7 @@ struct fsg_common {
368 struct task_struct *thread_task; 368 struct task_struct *thread_task;
369 369
370 /* Callback function to call when thread exits. */ 370 /* Callback function to call when thread exits. */
371 void (*thread_exits)(struct fsg_common *common); 371 int (*thread_exits)(struct fsg_common *common);
372 /* Gadget's private data. */ 372 /* Gadget's private data. */
373 void *private_data; 373 void *private_data;
374 374
@@ -392,8 +392,12 @@ struct fsg_config {
392 const char *lun_name_format; 392 const char *lun_name_format;
393 const char *thread_name; 393 const char *thread_name;
394 394
395 /* Callback function to call when thread exits. */ 395 /* Callback function to call when thread exits. If no
396 void (*thread_exits)(struct fsg_common *common); 396 * callback is set or it returns value lower then zero MSF
397 * will force eject all LUNs it operates on (including those
398 * marked as non-removable or with prevent_medium_removal flag
399 * set). */
400 int (*thread_exits)(struct fsg_common *common);
397 /* Gadget's private data. */ 401 /* Gadget's private data. */
398 void *private_data; 402 void *private_data;
399 403
@@ -614,7 +618,12 @@ static int fsg_setup(struct usb_function *f,
614 return -EDOM; 618 return -EDOM;
615 VDBG(fsg, "get max LUN\n"); 619 VDBG(fsg, "get max LUN\n");
616 *(u8 *) req->buf = fsg->common->nluns - 1; 620 *(u8 *) req->buf = fsg->common->nluns - 1;
617 return 1; 621
622 /* Respond with data/status */
623 req->length = min((u16)1, w_length);
624 fsg->common->ep0req_name =
625 ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
626 return ep0_queue(fsg->common);
618 } 627 }
619 628
620 VDBG(fsg, 629 VDBG(fsg,
@@ -1041,7 +1050,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
1041 unsigned long rc; 1050 unsigned long rc;
1042 1051
1043 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1052 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1044 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); 1053 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1045} 1054}
1046 1055
1047static int do_verify(struct fsg_common *common) 1056static int do_verify(struct fsg_common *common)
@@ -2524,14 +2533,6 @@ static void handle_exception(struct fsg_common *common)
2524 2533
2525 case FSG_STATE_CONFIG_CHANGE: 2534 case FSG_STATE_CONFIG_CHANGE:
2526 rc = do_set_config(common, new_config); 2535 rc = do_set_config(common, new_config);
2527 if (common->ep0_req_tag != exception_req_tag)
2528 break;
2529 if (rc != 0) { /* STALL on errors */
2530 DBG(common, "ep0 set halt\n");
2531 usb_ep_set_halt(common->ep0);
2532 } else { /* Complete the status stage */
2533 ep0_queue(common);
2534 }
2535 break; 2536 break;
2536 2537
2537 case FSG_STATE_EXIT: 2538 case FSG_STATE_EXIT:
@@ -2615,8 +2616,20 @@ static int fsg_main_thread(void *common_)
2615 common->thread_task = NULL; 2616 common->thread_task = NULL;
2616 spin_unlock_irq(&common->lock); 2617 spin_unlock_irq(&common->lock);
2617 2618
2618 if (common->thread_exits) 2619 if (!common->thread_exits || common->thread_exits(common) < 0) {
2619 common->thread_exits(common); 2620 struct fsg_lun *curlun = common->luns;
2621 unsigned i = common->nluns;
2622
2623 down_write(&common->filesem);
2624 for (; i--; ++curlun) {
2625 if (!fsg_lun_is_open(curlun))
2626 continue;
2627
2628 fsg_lun_close(curlun);
2629 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2630 }
2631 up_write(&common->filesem);
2632 }
2620 2633
2621 /* Let the unbind and cleanup routines know the thread has exited */ 2634 /* Let the unbind and cleanup routines know the thread has exited */
2622 complete_and_exit(&common->thread_notifier, 0); 2635 complete_and_exit(&common->thread_notifier, 0);
@@ -2763,10 +2776,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2763 if (cfg->release != 0xffff) { 2776 if (cfg->release != 0xffff) {
2764 i = cfg->release; 2777 i = cfg->release;
2765 } else { 2778 } else {
2766 /* The sa1100 controller is not supported */ 2779 i = usb_gadget_controller_number(gadget);
2767 i = gadget_is_sa1100(gadget)
2768 ? -1
2769 : usb_gadget_controller_number(gadget);
2770 if (i >= 0) { 2780 if (i >= 0) {
2771 i = 0x0300 + i; 2781 i = 0x0300 + i;
2772 } else { 2782 } else {
@@ -2791,8 +2801,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2791 * disable stalls. 2801 * disable stalls.
2792 */ 2802 */
2793 common->can_stall = cfg->can_stall && 2803 common->can_stall = cfg->can_stall &&
2794 !(gadget_is_sh(common->gadget) || 2804 !(gadget_is_at91(common->gadget));
2795 gadget_is_at91(common->gadget));
2796 2805
2797 2806
2798 spin_lock_init(&common->lock); 2807 spin_lock_init(&common->lock);
@@ -2852,7 +2861,6 @@ error_release:
2852 /* Call fsg_common_release() directly, ref might be not 2861 /* Call fsg_common_release() directly, ref might be not
2853 * initialised */ 2862 * initialised */
2854 fsg_common_release(&common->ref); 2863 fsg_common_release(&common->ref);
2855 complete(&common->thread_notifier);
2856 return ERR_PTR(rc); 2864 return ERR_PTR(rc);
2857} 2865}
2858 2866
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 95dae4c1ea40..a30e60c7f129 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -769,10 +769,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
769/* Some controllers can't support RNDIS ... */ 769/* Some controllers can't support RNDIS ... */
770static inline bool can_support_rndis(struct usb_configuration *c) 770static inline bool can_support_rndis(struct usb_configuration *c)
771{ 771{
772 /* only two endpoints on sa1100 */
773 if (gadget_is_sa1100(c->cdev->gadget))
774 return false;
775
776 /* everything else is *presumably* fine */ 772 /* everything else is *presumably* fine */
777 return true; 773 return true;
778} 774}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 29dfb0277ffb..b49d86e3e45b 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1448,7 +1448,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
1448 unsigned long rc; 1448 unsigned long rc;
1449 1449
1450 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1450 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1451 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); 1451 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1452} 1452}
1453 1453
1454static int do_verify(struct fsg_dev *fsg) 1454static int do_verify(struct fsg_dev *fsg)
@@ -3208,15 +3208,11 @@ static int __init check_parameters(struct fsg_dev *fsg)
3208 * halt bulk endpoints correctly. If one of them is present, 3208 * halt bulk endpoints correctly. If one of them is present,
3209 * disable stalls. 3209 * disable stalls.
3210 */ 3210 */
3211 if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget)) 3211 if (gadget_is_at91(fsg->gadget))
3212 mod_data.can_stall = 0; 3212 mod_data.can_stall = 0;
3213 3213
3214 if (mod_data.release == 0xffff) { // Parameter wasn't set 3214 if (mod_data.release == 0xffff) { // Parameter wasn't set
3215 /* The sa1100 controller is not supported */ 3215 gcnum = usb_gadget_controller_number(fsg->gadget);
3216 if (gadget_is_sa1100(fsg->gadget))
3217 gcnum = -1;
3218 else
3219 gcnum = usb_gadget_controller_number(fsg->gadget);
3220 if (gcnum >= 0) 3216 if (gcnum >= 0)
3221 mod_data.release = 0x0300 + gcnum; 3217 mod_data.release = 0x0300 + gcnum;
3222 else { 3218 else {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 7881f12413c4..3537d51073b2 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2749,7 +2749,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
2749} 2749}
2750 2750
2751/*-------------------------------------------------------------------------*/ 2751/*-------------------------------------------------------------------------*/
2752static struct of_device_id __devinitdata qe_udc_match[] = { 2752static const struct of_device_id qe_udc_match[] __devinitconst = {
2753 { 2753 {
2754 .compatible = "fsl,mpc8323-qe-usb", 2754 .compatible = "fsl,mpc8323-qe-usb",
2755 .data = (void *)PORT_QE, 2755 .data = (void *)PORT_QE,
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f2d270b202f2..1edbc12fff18 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -45,46 +45,18 @@
45#define gadget_is_goku(g) 0 45#define gadget_is_goku(g) 0
46#endif 46#endif
47 47
48/* SH3 UDC -- not yet ported 2.4 --> 2.6 */
49#ifdef CONFIG_USB_GADGET_SUPERH
50#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
51#else
52#define gadget_is_sh(g) 0
53#endif
54
55/* not yet stable on 2.6 (would help "original Zaurus") */
56#ifdef CONFIG_USB_GADGET_SA1100
57#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
58#else
59#define gadget_is_sa1100(g) 0
60#endif
61
62#ifdef CONFIG_USB_GADGET_LH7A40X 48#ifdef CONFIG_USB_GADGET_LH7A40X
63#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name) 49#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
64#else 50#else
65#define gadget_is_lh7a40x(g) 0 51#define gadget_is_lh7a40x(g) 0
66#endif 52#endif
67 53
68/* handhelds.org tree (?) */
69#ifdef CONFIG_USB_GADGET_MQ11XX
70#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
71#else
72#define gadget_is_mq11xx(g) 0
73#endif
74
75#ifdef CONFIG_USB_GADGET_OMAP 54#ifdef CONFIG_USB_GADGET_OMAP
76#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name) 55#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
77#else 56#else
78#define gadget_is_omap(g) 0 57#define gadget_is_omap(g) 0
79#endif 58#endif
80 59
81/* not yet ported 2.4 --> 2.6 */
82#ifdef CONFIG_USB_GADGET_N9604
83#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
84#else
85#define gadget_is_n9604(g) 0
86#endif
87
88/* various unstable versions available */ 60/* various unstable versions available */
89#ifdef CONFIG_USB_GADGET_PXA27X 61#ifdef CONFIG_USB_GADGET_PXA27X
90#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name) 62#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
@@ -122,14 +94,6 @@
122#define gadget_is_fsl_usb2(g) 0 94#define gadget_is_fsl_usb2(g) 0
123#endif 95#endif
124 96
125/* Mentor high speed function controller */
126/* from Montavista kernel (?) */
127#ifdef CONFIG_USB_GADGET_MUSBHSFC
128#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name)
129#else
130#define gadget_is_musbhsfc(g) 0
131#endif
132
133/* Mentor high speed "dual role" controller, in peripheral role */ 97/* Mentor high speed "dual role" controller, in peripheral role */
134#ifdef CONFIG_USB_GADGET_MUSB_HDRC 98#ifdef CONFIG_USB_GADGET_MUSB_HDRC
135#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name) 99#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name)
@@ -143,13 +107,6 @@
143#define gadget_is_langwell(g) 0 107#define gadget_is_langwell(g) 0
144#endif 108#endif
145 109
146/* from Montavista kernel (?) */
147#ifdef CONFIG_USB_GADGET_MPC8272
148#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
149#else
150#define gadget_is_mpc8272(g) 0
151#endif
152
153#ifdef CONFIG_USB_GADGET_M66592 110#ifdef CONFIG_USB_GADGET_M66592
154#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name) 111#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
155#else 112#else
@@ -203,20 +160,12 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
203 return 0x02; 160 return 0x02;
204 else if (gadget_is_pxa(gadget)) 161 else if (gadget_is_pxa(gadget))
205 return 0x03; 162 return 0x03;
206 else if (gadget_is_sh(gadget))
207 return 0x04;
208 else if (gadget_is_sa1100(gadget))
209 return 0x05;
210 else if (gadget_is_goku(gadget)) 163 else if (gadget_is_goku(gadget))
211 return 0x06; 164 return 0x06;
212 else if (gadget_is_mq11xx(gadget))
213 return 0x07;
214 else if (gadget_is_omap(gadget)) 165 else if (gadget_is_omap(gadget))
215 return 0x08; 166 return 0x08;
216 else if (gadget_is_lh7a40x(gadget)) 167 else if (gadget_is_lh7a40x(gadget))
217 return 0x09; 168 return 0x09;
218 else if (gadget_is_n9604(gadget))
219 return 0x10;
220 else if (gadget_is_pxa27x(gadget)) 169 else if (gadget_is_pxa27x(gadget))
221 return 0x11; 170 return 0x11;
222 else if (gadget_is_s3c2410(gadget)) 171 else if (gadget_is_s3c2410(gadget))
@@ -225,12 +174,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
225 return 0x13; 174 return 0x13;
226 else if (gadget_is_imx(gadget)) 175 else if (gadget_is_imx(gadget))
227 return 0x14; 176 return 0x14;
228 else if (gadget_is_musbhsfc(gadget))
229 return 0x15;
230 else if (gadget_is_musbhdrc(gadget)) 177 else if (gadget_is_musbhdrc(gadget))
231 return 0x16; 178 return 0x16;
232 else if (gadget_is_mpc8272(gadget))
233 return 0x17;
234 else if (gadget_is_atmel_usba(gadget)) 179 else if (gadget_is_atmel_usba(gadget))
235 return 0x18; 180 return 0x18;
236 else if (gadget_is_fsl_usb2(gadget)) 181 else if (gadget_is_fsl_usb2(gadget))
@@ -265,10 +210,6 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
265 if (gadget_is_pxa27x(gadget)) 210 if (gadget_is_pxa27x(gadget))
266 return false; 211 return false;
267 212
268 /* SH3 hardware just doesn't do altsettings */
269 if (gadget_is_sh(gadget))
270 return false;
271
272 /* Everything else is *presumably* fine ... */ 213 /* Everything else is *presumably* fine ... */
273 return true; 214 return true;
274} 215}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 5f6a2e0a9357..04f6224b7e06 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -618,11 +618,6 @@ gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags)
618 } 618 }
619#endif 619#endif
620 620
621 if (gadget_is_sa1100(gadget) && dev->config) {
622 /* tx fifo is full, but we can't clear it...*/
623 ERROR(dev, "can't change configurations\n");
624 return -ESPIPE;
625 }
626 gmidi_reset_config(dev); 621 gmidi_reset_config(dev);
627 622
628 switch (number) { 623 switch (number) {
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 112bb40a427c..e8edc640381e 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1859,7 +1859,7 @@ done:
1859 1859
1860/*-------------------------------------------------------------------------*/ 1860/*-------------------------------------------------------------------------*/
1861 1861
1862static struct pci_device_id pci_ids [] = { { 1862static const struct pci_device_id pci_ids[] = { {
1863 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 1863 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
1864 .class_mask = ~0, 1864 .class_mask = ~0,
1865 .vendor = 0x102f, /* Toshiba */ 1865 .vendor = 0x102f, /* Toshiba */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index bf0f6520c6df..de8a83803505 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -194,7 +194,7 @@ enum ep_state {
194}; 194};
195 195
196struct ep_data { 196struct ep_data {
197 struct semaphore lock; 197 struct mutex lock;
198 enum ep_state state; 198 enum ep_state state;
199 atomic_t count; 199 atomic_t count;
200 struct dev_data *dev; 200 struct dev_data *dev;
@@ -298,10 +298,10 @@ get_ready_ep (unsigned f_flags, struct ep_data *epdata)
298 int val; 298 int val;
299 299
300 if (f_flags & O_NONBLOCK) { 300 if (f_flags & O_NONBLOCK) {
301 if (down_trylock (&epdata->lock) != 0) 301 if (!mutex_trylock(&epdata->lock))
302 goto nonblock; 302 goto nonblock;
303 if (epdata->state != STATE_EP_ENABLED) { 303 if (epdata->state != STATE_EP_ENABLED) {
304 up (&epdata->lock); 304 mutex_unlock(&epdata->lock);
305nonblock: 305nonblock:
306 val = -EAGAIN; 306 val = -EAGAIN;
307 } else 307 } else
@@ -309,7 +309,8 @@ nonblock:
309 return val; 309 return val;
310 } 310 }
311 311
312 if ((val = down_interruptible (&epdata->lock)) < 0) 312 val = mutex_lock_interruptible(&epdata->lock);
313 if (val < 0)
313 return val; 314 return val;
314 315
315 switch (epdata->state) { 316 switch (epdata->state) {
@@ -323,7 +324,7 @@ nonblock:
323 // FALLTHROUGH 324 // FALLTHROUGH
324 case STATE_EP_UNBOUND: /* clean disconnect */ 325 case STATE_EP_UNBOUND: /* clean disconnect */
325 val = -ENODEV; 326 val = -ENODEV;
326 up (&epdata->lock); 327 mutex_unlock(&epdata->lock);
327 } 328 }
328 return val; 329 return val;
329} 330}
@@ -393,7 +394,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
393 if (likely (data->ep != NULL)) 394 if (likely (data->ep != NULL))
394 usb_ep_set_halt (data->ep); 395 usb_ep_set_halt (data->ep);
395 spin_unlock_irq (&data->dev->lock); 396 spin_unlock_irq (&data->dev->lock);
396 up (&data->lock); 397 mutex_unlock(&data->lock);
397 return -EBADMSG; 398 return -EBADMSG;
398 } 399 }
399 400
@@ -411,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
411 value = -EFAULT; 412 value = -EFAULT;
412 413
413free1: 414free1:
414 up (&data->lock); 415 mutex_unlock(&data->lock);
415 kfree (kbuf); 416 kfree (kbuf);
416 return value; 417 return value;
417} 418}
@@ -436,7 +437,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
436 if (likely (data->ep != NULL)) 437 if (likely (data->ep != NULL))
437 usb_ep_set_halt (data->ep); 438 usb_ep_set_halt (data->ep);
438 spin_unlock_irq (&data->dev->lock); 439 spin_unlock_irq (&data->dev->lock);
439 up (&data->lock); 440 mutex_unlock(&data->lock);
440 return -EBADMSG; 441 return -EBADMSG;
441 } 442 }
442 443
@@ -455,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
455 VDEBUG (data->dev, "%s write %zu IN, status %d\n", 456 VDEBUG (data->dev, "%s write %zu IN, status %d\n",
456 data->name, len, (int) value); 457 data->name, len, (int) value);
457free1: 458free1:
458 up (&data->lock); 459 mutex_unlock(&data->lock);
459 kfree (kbuf); 460 kfree (kbuf);
460 return value; 461 return value;
461} 462}
@@ -466,7 +467,8 @@ ep_release (struct inode *inode, struct file *fd)
466 struct ep_data *data = fd->private_data; 467 struct ep_data *data = fd->private_data;
467 int value; 468 int value;
468 469
469 if ((value = down_interruptible(&data->lock)) < 0) 470 value = mutex_lock_interruptible(&data->lock);
471 if (value < 0)
470 return value; 472 return value;
471 473
472 /* clean up if this can be reopened */ 474 /* clean up if this can be reopened */
@@ -476,7 +478,7 @@ ep_release (struct inode *inode, struct file *fd)
476 data->hs_desc.bDescriptorType = 0; 478 data->hs_desc.bDescriptorType = 0;
477 usb_ep_disable(data->ep); 479 usb_ep_disable(data->ep);
478 } 480 }
479 up (&data->lock); 481 mutex_unlock(&data->lock);
480 put_ep (data); 482 put_ep (data);
481 return 0; 483 return 0;
482} 484}
@@ -507,7 +509,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
507 } else 509 } else
508 status = -ENODEV; 510 status = -ENODEV;
509 spin_unlock_irq (&data->dev->lock); 511 spin_unlock_irq (&data->dev->lock);
510 up (&data->lock); 512 mutex_unlock(&data->lock);
511 return status; 513 return status;
512} 514}
513 515
@@ -673,7 +675,7 @@ fail:
673 value = -ENODEV; 675 value = -ENODEV;
674 spin_unlock_irq(&epdata->dev->lock); 676 spin_unlock_irq(&epdata->dev->lock);
675 677
676 up(&epdata->lock); 678 mutex_unlock(&epdata->lock);
677 679
678 if (unlikely(value)) { 680 if (unlikely(value)) {
679 kfree(priv); 681 kfree(priv);
@@ -765,7 +767,8 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
765 u32 tag; 767 u32 tag;
766 int value, length = len; 768 int value, length = len;
767 769
768 if ((value = down_interruptible (&data->lock)) < 0) 770 value = mutex_lock_interruptible(&data->lock);
771 if (value < 0)
769 return value; 772 return value;
770 773
771 if (data->state != STATE_EP_READY) { 774 if (data->state != STATE_EP_READY) {
@@ -854,7 +857,7 @@ fail:
854 data->desc.bDescriptorType = 0; 857 data->desc.bDescriptorType = 0;
855 data->hs_desc.bDescriptorType = 0; 858 data->hs_desc.bDescriptorType = 0;
856 } 859 }
857 up (&data->lock); 860 mutex_unlock(&data->lock);
858 return value; 861 return value;
859fail0: 862fail0:
860 value = -EINVAL; 863 value = -EINVAL;
@@ -870,7 +873,7 @@ ep_open (struct inode *inode, struct file *fd)
870 struct ep_data *data = inode->i_private; 873 struct ep_data *data = inode->i_private;
871 int value = -EBUSY; 874 int value = -EBUSY;
872 875
873 if (down_interruptible (&data->lock) != 0) 876 if (mutex_lock_interruptible(&data->lock) != 0)
874 return -EINTR; 877 return -EINTR;
875 spin_lock_irq (&data->dev->lock); 878 spin_lock_irq (&data->dev->lock);
876 if (data->dev->state == STATE_DEV_UNBOUND) 879 if (data->dev->state == STATE_DEV_UNBOUND)
@@ -885,7 +888,7 @@ ep_open (struct inode *inode, struct file *fd)
885 DBG (data->dev, "%s state %d\n", 888 DBG (data->dev, "%s state %d\n",
886 data->name, data->state); 889 data->name, data->state);
887 spin_unlock_irq (&data->dev->lock); 890 spin_unlock_irq (&data->dev->lock);
888 up (&data->lock); 891 mutex_unlock(&data->lock);
889 return value; 892 return value;
890} 893}
891 894
@@ -1631,7 +1634,7 @@ static int activate_ep_files (struct dev_data *dev)
1631 if (!data) 1634 if (!data)
1632 goto enomem0; 1635 goto enomem0;
1633 data->state = STATE_EP_DISABLED; 1636 data->state = STATE_EP_DISABLED;
1634 init_MUTEX (&data->lock); 1637 mutex_init(&data->lock);
1635 init_waitqueue_head (&data->wait); 1638 init_waitqueue_head (&data->wait);
1636 1639
1637 strncpy (data->name, ep->name, sizeof (data->name) - 1); 1640 strncpy (data->name, ep->name, sizeof (data->name) - 1);
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 19619fbf20ac..705cc1f76327 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -135,6 +135,12 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
135static unsigned long msg_registered = 0; 135static unsigned long msg_registered = 0;
136static void msg_cleanup(void); 136static void msg_cleanup(void);
137 137
138static int msg_thread_exits(struct fsg_common *common)
139{
140 msg_cleanup();
141 return 0;
142}
143
138static int __init msg_do_config(struct usb_configuration *c) 144static int __init msg_do_config(struct usb_configuration *c)
139{ 145{
140 struct fsg_common *common; 146 struct fsg_common *common;
@@ -147,7 +153,7 @@ static int __init msg_do_config(struct usb_configuration *c)
147 } 153 }
148 154
149 fsg_config_from_params(&config, &mod_data); 155 fsg_config_from_params(&config, &mod_data);
150 config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup; 156 config.thread_exits = msg_thread_exits;
151 common = fsg_common_init(0, c->cdev, &config); 157 common = fsg_common_init(0, c->cdev, &config);
152 if (IS_ERR(common)) 158 if (IS_ERR(common))
153 return PTR_ERR(common); 159 return PTR_ERR(common);
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
new file mode 100644
index 000000000000..7d6b66a85724
--- /dev/null
+++ b/drivers/usb/gadget/nokia.c
@@ -0,0 +1,259 @@
1/*
2 * nokia.c -- Nokia Composite Gadget Driver
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Contact: Felipe Balbi <felipe.balbi@nokia.com>
6 *
7 * This gadget driver borrows from serial.c which is:
8 *
9 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
10 * Copyright (C) 2008 by David Brownell
11 * Copyright (C) 2008 by Nokia Corporation
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * version 2 of that License.
16 */
17
18#include <linux/kernel.h>
19#include <linux/utsname.h>
20#include <linux/device.h>
21
22#include "u_serial.h"
23#include "u_ether.h"
24#include "u_phonet.h"
25#include "gadget_chips.h"
26
27/* Defines */
28
29#define NOKIA_VERSION_NUM 0x0211
30#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)"
31
32/*-------------------------------------------------------------------------*/
33
34/*
35 * Kbuild is not very cooperative with respect to linking separately
36 * compiled library objects into one module. So for now we won't use
37 * separate compilation ... ensuring init/exit sections work to shrink
38 * the runtime footprint, and giving us at least some parts of what
39 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
40 */
41#include "composite.c"
42#include "usbstring.c"
43#include "config.c"
44#include "epautoconf.c"
45
46#include "u_serial.c"
47#include "f_acm.c"
48#include "f_ecm.c"
49#include "f_obex.c"
50#include "f_serial.c"
51#include "f_phonet.c"
52#include "u_ether.c"
53
54/*-------------------------------------------------------------------------*/
55
56#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
57#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
58
59/* string IDs are assigned dynamically */
60
61#define STRING_MANUFACTURER_IDX 0
62#define STRING_PRODUCT_IDX 1
63#define STRING_DESCRIPTION_IDX 2
64
65static char manufacturer_nokia[] = "Nokia";
66static const char product_nokia[] = NOKIA_LONG_NAME;
67static const char description_nokia[] = "PC-Suite Configuration";
68
69static struct usb_string strings_dev[] = {
70 [STRING_MANUFACTURER_IDX].s = manufacturer_nokia,
71 [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME,
72 [STRING_DESCRIPTION_IDX].s = description_nokia,
73 { } /* end of list */
74};
75
76static struct usb_gadget_strings stringtab_dev = {
77 .language = 0x0409, /* en-us */
78 .strings = strings_dev,
79};
80
81static struct usb_gadget_strings *dev_strings[] = {
82 &stringtab_dev,
83 NULL,
84};
85
86static struct usb_device_descriptor device_desc = {
87 .bLength = USB_DT_DEVICE_SIZE,
88 .bDescriptorType = USB_DT_DEVICE,
89 .bcdUSB = __constant_cpu_to_le16(0x0200),
90 .bDeviceClass = USB_CLASS_COMM,
91 .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID),
92 .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
93 /* .iManufacturer = DYNAMIC */
94 /* .iProduct = DYNAMIC */
95 .bNumConfigurations = 1,
96};
97
98/*-------------------------------------------------------------------------*/
99
100/* Module */
101MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
102MODULE_AUTHOR("Felipe Balbi");
103MODULE_LICENSE("GPL");
104
105/*-------------------------------------------------------------------------*/
106
107static u8 hostaddr[ETH_ALEN];
108
109static int __init nokia_bind_config(struct usb_configuration *c)
110{
111 int status = 0;
112
113 status = phonet_bind_config(c);
114 if (status)
115 printk(KERN_DEBUG "could not bind phonet config\n");
116
117 status = obex_bind_config(c, 0);
118 if (status)
119 printk(KERN_DEBUG "could not bind obex config %d\n", 0);
120
121 status = obex_bind_config(c, 1);
122 if (status)
123 printk(KERN_DEBUG "could not bind obex config %d\n", 0);
124
125 status = acm_bind_config(c, 2);
126 if (status)
127 printk(KERN_DEBUG "could not bind acm config\n");
128
129 status = ecm_bind_config(c, hostaddr);
130 if (status)
131 printk(KERN_DEBUG "could not bind ecm config\n");
132
133 return status;
134}
135
136static struct usb_configuration nokia_config_500ma_driver = {
137 .label = "Bus Powered",
138 .bind = nokia_bind_config,
139 .bConfigurationValue = 1,
140 /* .iConfiguration = DYNAMIC */
141 .bmAttributes = USB_CONFIG_ATT_ONE,
142 .bMaxPower = 250, /* 500mA */
143};
144
145static struct usb_configuration nokia_config_100ma_driver = {
146 .label = "Self Powered",
147 .bind = nokia_bind_config,
148 .bConfigurationValue = 2,
149 /* .iConfiguration = DYNAMIC */
150 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
151 .bMaxPower = 50, /* 100 mA */
152};
153
154static int __init nokia_bind(struct usb_composite_dev *cdev)
155{
156 int gcnum;
157 struct usb_gadget *gadget = cdev->gadget;
158 int status;
159
160 status = gphonet_setup(cdev->gadget);
161 if (status < 0)
162 goto err_phonet;
163
164 status = gserial_setup(cdev->gadget, 3);
165 if (status < 0)
166 goto err_serial;
167
168 status = gether_setup(cdev->gadget, hostaddr);
169 if (status < 0)
170 goto err_ether;
171
172 status = usb_string_id(cdev);
173 if (status < 0)
174 goto err_usb;
175 strings_dev[STRING_MANUFACTURER_IDX].id = status;
176
177 device_desc.iManufacturer = status;
178
179 status = usb_string_id(cdev);
180 if (status < 0)
181 goto err_usb;
182 strings_dev[STRING_PRODUCT_IDX].id = status;
183
184 device_desc.iProduct = status;
185
186 /* config description */
187 status = usb_string_id(cdev);
188 if (status < 0)
189 goto err_usb;
190 strings_dev[STRING_DESCRIPTION_IDX].id = status;
191
192 nokia_config_500ma_driver.iConfiguration = status;
193 nokia_config_100ma_driver.iConfiguration = status;
194
195 /* set up other descriptors */
196 gcnum = usb_gadget_controller_number(gadget);
197 if (gcnum >= 0)
198 device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM);
199 else {
200 /* this should only work with hw that supports altsettings
201 * and several endpoints, anything else, panic.
202 */
203 pr_err("nokia_bind: controller '%s' not recognized\n",
204 gadget->name);
205 goto err_usb;
206 }
207
208 /* finaly register the configuration */
209 status = usb_add_config(cdev, &nokia_config_500ma_driver);
210 if (status < 0)
211 goto err_usb;
212
213 status = usb_add_config(cdev, &nokia_config_100ma_driver);
214 if (status < 0)
215 goto err_usb;
216
217 dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
218
219 return 0;
220
221err_usb:
222 gether_cleanup();
223err_ether:
224 gserial_cleanup();
225err_serial:
226 gphonet_cleanup();
227err_phonet:
228 return status;
229}
230
231static int __exit nokia_unbind(struct usb_composite_dev *cdev)
232{
233 gphonet_cleanup();
234 gserial_cleanup();
235 gether_cleanup();
236
237 return 0;
238}
239
240static struct usb_composite_driver nokia_driver = {
241 .name = "g_nokia",
242 .dev = &device_desc,
243 .strings = dev_strings,
244 .bind = nokia_bind,
245 .unbind = __exit_p(nokia_unbind),
246};
247
248static int __init nokia_init(void)
249{
250 return usb_composite_register(&nokia_driver);
251}
252module_init(nokia_init);
253
254static void __exit nokia_cleanup(void)
255{
256 usb_composite_unregister(&nokia_driver);
257}
258module_exit(nokia_cleanup);
259
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2d867fd22413..6b8bf8c781c4 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -949,12 +949,6 @@ printer_set_config(struct printer_dev *dev, unsigned number)
949 int result = 0; 949 int result = 0;
950 struct usb_gadget *gadget = dev->gadget; 950 struct usb_gadget *gadget = dev->gadget;
951 951
952 if (gadget_is_sa1100(gadget) && dev->config) {
953 /* tx fifo is full, but we can't clear it...*/
954 INFO(dev, "can't change configurations\n");
955 return -ESPIPE;
956 }
957
958 switch (number) { 952 switch (number) {
959 case DEV_CONFIG_VALUE: 953 case DEV_CONFIG_VALUE:
960 result = 0; 954 result = 0;
@@ -1033,12 +1027,6 @@ set_interface(struct printer_dev *dev, unsigned number)
1033{ 1027{
1034 int result = 0; 1028 int result = 0;
1035 1029
1036 if (gadget_is_sa1100(dev->gadget) && dev->interface < 0) {
1037 /* tx fifo is full, but we can't clear it...*/
1038 INFO(dev, "can't change interfaces\n");
1039 return -ESPIPE;
1040 }
1041
1042 /* Free the current interface */ 1030 /* Free the current interface */
1043 switch (dev->interface) { 1031 switch (dev->interface) {
1044 case PRINTER_INTERFACE: 1032 case PRINTER_INTERFACE:
@@ -1392,12 +1380,6 @@ printer_bind(struct usb_gadget *gadget)
1392 goto fail; 1380 goto fail;
1393 } 1381 }
1394 1382
1395 if (gadget_is_sa1100(gadget)) {
1396 /* hardware can't write zero length packets. */
1397 ERROR(dev, "SA1100 controller is unsupport by this driver\n");
1398 goto fail;
1399 }
1400
1401 gcnum = usb_gadget_controller_number(gadget); 1383 gcnum = usb_gadget_controller_number(gadget);
1402 if (gcnum >= 0) { 1384 if (gcnum >= 0) {
1403 device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum); 1385 device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index adda1208a1ec..05b892c3d686 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -742,13 +742,17 @@ static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
742 * @ep: pxa physical endpoint 742 * @ep: pxa physical endpoint
743 * @req: pxa request 743 * @req: pxa request
744 * @status: usb request status sent to gadget API 744 * @status: usb request status sent to gadget API
745 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
745 * 746 *
746 * Context: ep->lock held 747 * Context: ep->lock held if flags not NULL, else ep->lock released
747 * 748 *
748 * Retire a pxa27x usb request. Endpoint must be locked. 749 * Retire a pxa27x usb request. Endpoint must be locked.
749 */ 750 */
750static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) 751static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
752 unsigned long *pflags)
751{ 753{
754 unsigned long flags;
755
752 ep_del_request(ep, req); 756 ep_del_request(ep, req);
753 if (likely(req->req.status == -EINPROGRESS)) 757 if (likely(req->req.status == -EINPROGRESS))
754 req->req.status = status; 758 req->req.status = status;
@@ -760,38 +764,48 @@ static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
760 &req->req, status, 764 &req->req, status,
761 req->req.actual, req->req.length); 765 req->req.actual, req->req.length);
762 766
767 if (pflags)
768 spin_unlock_irqrestore(&ep->lock, *pflags);
769 local_irq_save(flags);
763 req->req.complete(&req->udc_usb_ep->usb_ep, &req->req); 770 req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
771 local_irq_restore(flags);
772 if (pflags)
773 spin_lock_irqsave(&ep->lock, *pflags);
764} 774}
765 775
766/** 776/**
767 * ep_end_out_req - Ends endpoint OUT request 777 * ep_end_out_req - Ends endpoint OUT request
768 * @ep: physical endpoint 778 * @ep: physical endpoint
769 * @req: pxa request 779 * @req: pxa request
780 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
770 * 781 *
771 * Context: ep->lock held 782 * Context: ep->lock held or released (see req_done())
772 * 783 *
773 * Ends endpoint OUT request (completes usb request). 784 * Ends endpoint OUT request (completes usb request).
774 */ 785 */
775static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) 786static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
787 unsigned long *pflags)
776{ 788{
777 inc_ep_stats_reqs(ep, !USB_DIR_IN); 789 inc_ep_stats_reqs(ep, !USB_DIR_IN);
778 req_done(ep, req, 0); 790 req_done(ep, req, 0, pflags);
779} 791}
780 792
781/** 793/**
782 * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) 794 * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
783 * @ep: physical endpoint 795 * @ep: physical endpoint
784 * @req: pxa request 796 * @req: pxa request
797 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
785 * 798 *
786 * Context: ep->lock held 799 * Context: ep->lock held or released (see req_done())
787 * 800 *
788 * Ends control endpoint OUT request (completes usb request), and puts 801 * Ends control endpoint OUT request (completes usb request), and puts
789 * control endpoint into idle state 802 * control endpoint into idle state
790 */ 803 */
791static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) 804static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
805 unsigned long *pflags)
792{ 806{
793 set_ep0state(ep->dev, OUT_STATUS_STAGE); 807 set_ep0state(ep->dev, OUT_STATUS_STAGE);
794 ep_end_out_req(ep, req); 808 ep_end_out_req(ep, req, pflags);
795 ep0_idle(ep->dev); 809 ep0_idle(ep->dev);
796} 810}
797 811
@@ -799,31 +813,35 @@ static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
799 * ep_end_in_req - Ends endpoint IN request 813 * ep_end_in_req - Ends endpoint IN request
800 * @ep: physical endpoint 814 * @ep: physical endpoint
801 * @req: pxa request 815 * @req: pxa request
816 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
802 * 817 *
803 * Context: ep->lock held 818 * Context: ep->lock held or released (see req_done())
804 * 819 *
805 * Ends endpoint IN request (completes usb request). 820 * Ends endpoint IN request (completes usb request).
806 */ 821 */
807static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) 822static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
823 unsigned long *pflags)
808{ 824{
809 inc_ep_stats_reqs(ep, USB_DIR_IN); 825 inc_ep_stats_reqs(ep, USB_DIR_IN);
810 req_done(ep, req, 0); 826 req_done(ep, req, 0, pflags);
811} 827}
812 828
813/** 829/**
814 * ep0_end_in_req - Ends control endpoint IN request (ends data stage) 830 * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
815 * @ep: physical endpoint 831 * @ep: physical endpoint
816 * @req: pxa request 832 * @req: pxa request
833 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
817 * 834 *
818 * Context: ep->lock held 835 * Context: ep->lock held or released (see req_done())
819 * 836 *
820 * Ends control endpoint IN request (completes usb request), and puts 837 * Ends control endpoint IN request (completes usb request), and puts
821 * control endpoint into status state 838 * control endpoint into status state
822 */ 839 */
823static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) 840static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
841 unsigned long *pflags)
824{ 842{
825 set_ep0state(ep->dev, IN_STATUS_STAGE); 843 set_ep0state(ep->dev, IN_STATUS_STAGE);
826 ep_end_in_req(ep, req); 844 ep_end_in_req(ep, req, pflags);
827} 845}
828 846
829/** 847/**
@@ -831,19 +849,22 @@ static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
831 * @ep: pxa endpoint 849 * @ep: pxa endpoint
832 * @status: usb request status 850 * @status: usb request status
833 * 851 *
834 * Context: ep->lock held 852 * Context: ep->lock released
835 * 853 *
836 * Dequeues all requests on an endpoint. As a side effect, interrupts will be 854 * Dequeues all requests on an endpoint. As a side effect, interrupts will be
837 * disabled on that endpoint (because no more requests). 855 * disabled on that endpoint (because no more requests).
838 */ 856 */
839static void nuke(struct pxa_ep *ep, int status) 857static void nuke(struct pxa_ep *ep, int status)
840{ 858{
841 struct pxa27x_request *req; 859 struct pxa27x_request *req;
860 unsigned long flags;
842 861
862 spin_lock_irqsave(&ep->lock, flags);
843 while (!list_empty(&ep->queue)) { 863 while (!list_empty(&ep->queue)) {
844 req = list_entry(ep->queue.next, struct pxa27x_request, queue); 864 req = list_entry(ep->queue.next, struct pxa27x_request, queue);
845 req_done(ep, req, status); 865 req_done(ep, req, status, &flags);
846 } 866 }
867 spin_unlock_irqrestore(&ep->lock, flags);
847} 868}
848 869
849/** 870/**
@@ -1123,6 +1144,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1123 int rc = 0; 1144 int rc = 0;
1124 int is_first_req; 1145 int is_first_req;
1125 unsigned length; 1146 unsigned length;
1147 int recursion_detected;
1126 1148
1127 req = container_of(_req, struct pxa27x_request, req); 1149 req = container_of(_req, struct pxa27x_request, req);
1128 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1150 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
@@ -1152,6 +1174,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1152 return -EMSGSIZE; 1174 return -EMSGSIZE;
1153 1175
1154 spin_lock_irqsave(&ep->lock, flags); 1176 spin_lock_irqsave(&ep->lock, flags);
1177 recursion_detected = ep->in_handle_ep;
1155 1178
1156 is_first_req = list_empty(&ep->queue); 1179 is_first_req = list_empty(&ep->queue);
1157 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", 1180 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
@@ -1161,12 +1184,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1161 if (!ep->enabled) { 1184 if (!ep->enabled) {
1162 _req->status = -ESHUTDOWN; 1185 _req->status = -ESHUTDOWN;
1163 rc = -ESHUTDOWN; 1186 rc = -ESHUTDOWN;
1164 goto out; 1187 goto out_locked;
1165 } 1188 }
1166 1189
1167 if (req->in_use) { 1190 if (req->in_use) {
1168 ep_err(ep, "refusing to queue req %p (already queued)\n", req); 1191 ep_err(ep, "refusing to queue req %p (already queued)\n", req);
1169 goto out; 1192 goto out_locked;
1170 } 1193 }
1171 1194
1172 length = _req->length; 1195 length = _req->length;
@@ -1174,12 +1197,13 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1174 _req->actual = 0; 1197 _req->actual = 0;
1175 1198
1176 ep_add_request(ep, req); 1199 ep_add_request(ep, req);
1200 spin_unlock_irqrestore(&ep->lock, flags);
1177 1201
1178 if (is_ep0(ep)) { 1202 if (is_ep0(ep)) {
1179 switch (dev->ep0state) { 1203 switch (dev->ep0state) {
1180 case WAIT_ACK_SET_CONF_INTERF: 1204 case WAIT_ACK_SET_CONF_INTERF:
1181 if (length == 0) { 1205 if (length == 0) {
1182 ep_end_in_req(ep, req); 1206 ep_end_in_req(ep, req, NULL);
1183 } else { 1207 } else {
1184 ep_err(ep, "got a request of %d bytes while" 1208 ep_err(ep, "got a request of %d bytes while"
1185 "in state WAIT_ACK_SET_CONF_INTERF\n", 1209 "in state WAIT_ACK_SET_CONF_INTERF\n",
@@ -1192,12 +1216,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1192 case IN_DATA_STAGE: 1216 case IN_DATA_STAGE:
1193 if (!ep_is_full(ep)) 1217 if (!ep_is_full(ep))
1194 if (write_ep0_fifo(ep, req)) 1218 if (write_ep0_fifo(ep, req))
1195 ep0_end_in_req(ep, req); 1219 ep0_end_in_req(ep, req, NULL);
1196 break; 1220 break;
1197 case OUT_DATA_STAGE: 1221 case OUT_DATA_STAGE:
1198 if ((length == 0) || !epout_has_pkt(ep)) 1222 if ((length == 0) || !epout_has_pkt(ep))
1199 if (read_ep0_fifo(ep, req)) 1223 if (read_ep0_fifo(ep, req))
1200 ep0_end_out_req(ep, req); 1224 ep0_end_out_req(ep, req, NULL);
1201 break; 1225 break;
1202 default: 1226 default:
1203 ep_err(ep, "odd state %s to send me a request\n", 1227 ep_err(ep, "odd state %s to send me a request\n",
@@ -1207,12 +1231,15 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1207 break; 1231 break;
1208 } 1232 }
1209 } else { 1233 } else {
1210 handle_ep(ep); 1234 if (!recursion_detected)
1235 handle_ep(ep);
1211 } 1236 }
1212 1237
1213out: 1238out:
1214 spin_unlock_irqrestore(&ep->lock, flags);
1215 return rc; 1239 return rc;
1240out_locked:
1241 spin_unlock_irqrestore(&ep->lock, flags);
1242 goto out;
1216} 1243}
1217 1244
1218/** 1245/**
@@ -1242,13 +1269,14 @@ static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1242 /* make sure it's actually queued on this endpoint */ 1269 /* make sure it's actually queued on this endpoint */
1243 list_for_each_entry(req, &ep->queue, queue) { 1270 list_for_each_entry(req, &ep->queue, queue) {
1244 if (&req->req == _req) { 1271 if (&req->req == _req) {
1245 req_done(ep, req, -ECONNRESET);
1246 rc = 0; 1272 rc = 0;
1247 break; 1273 break;
1248 } 1274 }
1249 } 1275 }
1250 1276
1251 spin_unlock_irqrestore(&ep->lock, flags); 1277 spin_unlock_irqrestore(&ep->lock, flags);
1278 if (!rc)
1279 req_done(ep, req, -ECONNRESET, NULL);
1252 return rc; 1280 return rc;
1253} 1281}
1254 1282
@@ -1445,7 +1473,6 @@ static int pxa_ep_disable(struct usb_ep *_ep)
1445{ 1473{
1446 struct pxa_ep *ep; 1474 struct pxa_ep *ep;
1447 struct udc_usb_ep *udc_usb_ep; 1475 struct udc_usb_ep *udc_usb_ep;
1448 unsigned long flags;
1449 1476
1450 if (!_ep) 1477 if (!_ep)
1451 return -EINVAL; 1478 return -EINVAL;
@@ -1455,10 +1482,8 @@ static int pxa_ep_disable(struct usb_ep *_ep)
1455 if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) 1482 if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
1456 return -EINVAL; 1483 return -EINVAL;
1457 1484
1458 spin_lock_irqsave(&ep->lock, flags);
1459 ep->enabled = 0; 1485 ep->enabled = 0;
1460 nuke(ep, -ESHUTDOWN); 1486 nuke(ep, -ESHUTDOWN);
1461 spin_unlock_irqrestore(&ep->lock, flags);
1462 1487
1463 pxa_ep_fifo_flush(_ep); 1488 pxa_ep_fifo_flush(_ep);
1464 udc_usb_ep->pxa_ep = NULL; 1489 udc_usb_ep->pxa_ep = NULL;
@@ -1907,8 +1932,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1907 } u; 1932 } u;
1908 int i; 1933 int i;
1909 int have_extrabytes = 0; 1934 int have_extrabytes = 0;
1935 unsigned long flags;
1910 1936
1911 nuke(ep, -EPROTO); 1937 nuke(ep, -EPROTO);
1938 spin_lock_irqsave(&ep->lock, flags);
1912 1939
1913 /* 1940 /*
1914 * In the PXA320 manual, in the section about Back-to-Back setup 1941 * In the PXA320 manual, in the section about Back-to-Back setup
@@ -1947,10 +1974,13 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1947 /* Tell UDC to enter Data Stage */ 1974 /* Tell UDC to enter Data Stage */
1948 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); 1975 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
1949 1976
1977 spin_unlock_irqrestore(&ep->lock, flags);
1950 i = udc->driver->setup(&udc->gadget, &u.r); 1978 i = udc->driver->setup(&udc->gadget, &u.r);
1979 spin_lock_irqsave(&ep->lock, flags);
1951 if (i < 0) 1980 if (i < 0)
1952 goto stall; 1981 goto stall;
1953out: 1982out:
1983 spin_unlock_irqrestore(&ep->lock, flags);
1954 return; 1984 return;
1955stall: 1985stall:
1956 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", 1986 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
@@ -2055,13 +2085,13 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
2055 if (req && !ep_is_full(ep)) 2085 if (req && !ep_is_full(ep))
2056 completed = write_ep0_fifo(ep, req); 2086 completed = write_ep0_fifo(ep, req);
2057 if (completed) 2087 if (completed)
2058 ep0_end_in_req(ep, req); 2088 ep0_end_in_req(ep, req, NULL);
2059 break; 2089 break;
2060 case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ 2090 case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
2061 if (epout_has_pkt(ep) && req) 2091 if (epout_has_pkt(ep) && req)
2062 completed = read_ep0_fifo(ep, req); 2092 completed = read_ep0_fifo(ep, req);
2063 if (completed) 2093 if (completed)
2064 ep0_end_out_req(ep, req); 2094 ep0_end_out_req(ep, req, NULL);
2065 break; 2095 break;
2066 case STALL: 2096 case STALL:
2067 ep_write_UDCCSR(ep, UDCCSR0_FST); 2097 ep_write_UDCCSR(ep, UDCCSR0_FST);
@@ -2091,7 +2121,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
2091 * Tries to transfer all pending request data into the endpoint and/or 2121 * Tries to transfer all pending request data into the endpoint and/or
2092 * transfer all pending data in the endpoint into usb requests. 2122 * transfer all pending data in the endpoint into usb requests.
2093 * 2123 *
2094 * Is always called when in_interrupt() or with ep->lock held. 2124 * Is always called when in_interrupt() and with ep->lock released.
2095 */ 2125 */
2096static void handle_ep(struct pxa_ep *ep) 2126static void handle_ep(struct pxa_ep *ep)
2097{ 2127{
@@ -2100,10 +2130,17 @@ static void handle_ep(struct pxa_ep *ep)
2100 u32 udccsr; 2130 u32 udccsr;
2101 int is_in = ep->dir_in; 2131 int is_in = ep->dir_in;
2102 int loop = 0; 2132 int loop = 0;
2133 unsigned long flags;
2134
2135 spin_lock_irqsave(&ep->lock, flags);
2136 if (ep->in_handle_ep)
2137 goto recursion_detected;
2138 ep->in_handle_ep = 1;
2103 2139
2104 do { 2140 do {
2105 completed = 0; 2141 completed = 0;
2106 udccsr = udc_ep_readl(ep, UDCCSR); 2142 udccsr = udc_ep_readl(ep, UDCCSR);
2143
2107 if (likely(!list_empty(&ep->queue))) 2144 if (likely(!list_empty(&ep->queue)))
2108 req = list_entry(ep->queue.next, 2145 req = list_entry(ep->queue.next,
2109 struct pxa27x_request, queue); 2146 struct pxa27x_request, queue);
@@ -2122,15 +2159,22 @@ static void handle_ep(struct pxa_ep *ep)
2122 if (unlikely(is_in)) { 2159 if (unlikely(is_in)) {
2123 if (likely(!ep_is_full(ep))) 2160 if (likely(!ep_is_full(ep)))
2124 completed = write_fifo(ep, req); 2161 completed = write_fifo(ep, req);
2125 if (completed)
2126 ep_end_in_req(ep, req);
2127 } else { 2162 } else {
2128 if (likely(epout_has_pkt(ep))) 2163 if (likely(epout_has_pkt(ep)))
2129 completed = read_fifo(ep, req); 2164 completed = read_fifo(ep, req);
2130 if (completed) 2165 }
2131 ep_end_out_req(ep, req); 2166
2167 if (completed) {
2168 if (is_in)
2169 ep_end_in_req(ep, req, &flags);
2170 else
2171 ep_end_out_req(ep, req, &flags);
2132 } 2172 }
2133 } while (completed); 2173 } while (completed);
2174
2175 ep->in_handle_ep = 0;
2176recursion_detected:
2177 spin_unlock_irqrestore(&ep->lock, flags);
2134} 2178}
2135 2179
2136/** 2180/**
@@ -2218,9 +2262,13 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
2218 continue; 2262 continue;
2219 2263
2220 udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK)); 2264 udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
2221 ep = &udc->pxa_ep[i]; 2265
2222 ep->stats.irqs++; 2266 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2223 handle_ep(ep); 2267 if (i < ARRAY_SIZE(udc->pxa_ep)) {
2268 ep = &udc->pxa_ep[i];
2269 ep->stats.irqs++;
2270 handle_ep(ep);
2271 }
2224 } 2272 }
2225 2273
2226 for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) { 2274 for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
@@ -2228,9 +2276,12 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
2228 if (!(udcisr1 & UDCISR_INT_MASK)) 2276 if (!(udcisr1 & UDCISR_INT_MASK))
2229 continue; 2277 continue;
2230 2278
2231 ep = &udc->pxa_ep[i]; 2279 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2232 ep->stats.irqs++; 2280 if (i < ARRAY_SIZE(udc->pxa_ep)) {
2233 handle_ep(ep); 2281 ep = &udc->pxa_ep[i];
2282 ep->stats.irqs++;
2283 handle_ep(ep);
2284 }
2234 } 2285 }
2235 2286
2236} 2287}
@@ -2439,7 +2490,7 @@ static int __init pxa_udc_probe(struct platform_device *pdev)
2439 } 2490 }
2440 2491
2441 retval = -ENOMEM; 2492 retval = -ENOMEM;
2442 udc->regs = ioremap(regs->start, regs->end - regs->start + 1); 2493 udc->regs = ioremap(regs->start, resource_size(regs));
2443 if (!udc->regs) { 2494 if (!udc->regs) {
2444 dev_err(&pdev->dev, "Unable to map UDC I/O memory\n"); 2495 dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
2445 goto err_map; 2496 goto err_map;
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index e25225e26586..ff61e4866e8a 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -318,6 +318,11 @@ struct udc_usb_ep {
318 * @queue: requests queue 318 * @queue: requests queue
319 * @lock: lock to pxa_ep data (queues and stats) 319 * @lock: lock to pxa_ep data (queues and stats)
320 * @enabled: true when endpoint enabled (not stopped by gadget layer) 320 * @enabled: true when endpoint enabled (not stopped by gadget layer)
321 * @in_handle_ep: number of recursions of handle_ep() function
322 * Prevents deadlocks or infinite recursions of types :
323 * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep()
324 * or
325 * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue()
321 * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX) 326 * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
322 * @name: endpoint name (for trace/debug purpose) 327 * @name: endpoint name (for trace/debug purpose)
323 * @dir_in: 1 if IN endpoint, 0 if OUT endpoint 328 * @dir_in: 1 if IN endpoint, 0 if OUT endpoint
@@ -346,6 +351,7 @@ struct pxa_ep {
346 spinlock_t lock; /* Protects this structure */ 351 spinlock_t lock; /* Protects this structure */
347 /* (queues, stats) */ 352 /* (queues, stats) */
348 unsigned enabled:1; 353 unsigned enabled:1;
354 unsigned in_handle_ep:1;
349 355
350 unsigned idx:5; 356 unsigned idx:5;
351 char *name; 357 char *name;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 5fc80a104150..7e5bf593d386 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -317,7 +317,8 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
317 * 317 *
318 * Allocate a new USB request structure appropriate for the specified endpoint 318 * Allocate a new USB request structure appropriate for the specified endpoint
319 */ 319 */
320struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags) 320static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
321 gfp_t flags)
321{ 322{
322 struct s3c_hsotg_req *req; 323 struct s3c_hsotg_req *req;
323 324
@@ -373,7 +374,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
373 req->dma = DMA_ADDR_INVALID; 374 req->dma = DMA_ADDR_INVALID;
374 hs_req->mapped = 0; 375 hs_req->mapped = 0;
375 } else { 376 } else {
376 dma_sync_single(hsotg->dev, req->dma, req->length, dir); 377 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
377 } 378 }
378} 379}
379 380
@@ -755,7 +756,7 @@ static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
755 hs_req->mapped = 1; 756 hs_req->mapped = 1;
756 req->dma = dma; 757 req->dma = dma;
757 } else { 758 } else {
758 dma_sync_single(hsotg->dev, req->dma, req->length, dir); 759 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
759 hs_req->mapped = 0; 760 hs_req->mapped = 0;
760 } 761 }
761 762
@@ -1460,7 +1461,7 @@ static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
1460 * as the actual data should be sent to the memory directly and we turn 1461 * as the actual data should be sent to the memory directly and we turn
1461 * on the completion interrupts to get notifications of transfer completion. 1462 * on the completion interrupts to get notifications of transfer completion.
1462 */ 1463 */
1463void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg) 1464static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
1464{ 1465{
1465 u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP); 1466 u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
1466 u32 epnum, status, size; 1467 u32 epnum, status, size;
@@ -3094,7 +3095,7 @@ static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
3094 local_irq_restore(flags); 3095 local_irq_restore(flags);
3095} 3096}
3096 3097
3097struct s3c_hsotg_plat s3c_hsotg_default_pdata; 3098static struct s3c_hsotg_plat s3c_hsotg_default_pdata;
3098 3099
3099static int __devinit s3c_hsotg_probe(struct platform_device *pdev) 3100static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
3100{ 3101{
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2fc02bd95848..84ca195c2d10 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -746,6 +746,10 @@ static const struct net_device_ops eth_netdev_ops = {
746 .ndo_validate_addr = eth_validate_addr, 746 .ndo_validate_addr = eth_validate_addr,
747}; 747};
748 748
749static struct device_type gadget_type = {
750 .name = "gadget",
751};
752
749/** 753/**
750 * gether_setup - initialize one ethernet-over-usb link 754 * gether_setup - initialize one ethernet-over-usb link
751 * @g: gadget to associated with these links 755 * @g: gadget to associated with these links
@@ -808,6 +812,7 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
808 812
809 dev->gadget = g; 813 dev->gadget = g;
810 SET_NETDEV_DEV(net, &g->dev); 814 SET_NETDEV_DEV(net, &g->dev);
815 SET_NETDEV_DEVTYPE(net, &gadget_type);
811 816
812 status = register_netdev(net); 817 status = register_netdev(net);
813 if (status < 0) { 818 if (status < 0) {
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index fd55f450bc0e..3c8c0c9f9d72 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -93,13 +93,6 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
93 if (!gadget_supports_altsettings(gadget)) 93 if (!gadget_supports_altsettings(gadget))
94 return false; 94 return false;
95 95
96 /* SA1100 can do ECM, *without* status endpoint ... but we'll
97 * only use it in non-ECM mode for backwards compatibility
98 * (and since we currently require a status endpoint)
99 */
100 if (gadget_is_sa1100(gadget))
101 return false;
102
103 /* Everything else is *presumably* fine ... but this is a bit 96 /* Everything else is *presumably* fine ... but this is a bit
104 * chancy, so be **CERTAIN** there are no hardware issues with 97 * chancy, so be **CERTAIN** there are no hardware issues with
105 * your controller. Add it above if it can't handle CDC. 98 * your controller. Add it above if it can't handle CDC.
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 2d772401b7ad..fac81ee193dd 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -297,12 +297,10 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
297 */ 297 */
298 if (loopdefault) { 298 if (loopdefault) {
299 loopback_add(cdev, autoresume != 0); 299 loopback_add(cdev, autoresume != 0);
300 if (!gadget_is_sh(gadget)) 300 sourcesink_add(cdev, autoresume != 0);
301 sourcesink_add(cdev, autoresume != 0);
302 } else { 301 } else {
303 sourcesink_add(cdev, autoresume != 0); 302 sourcesink_add(cdev, autoresume != 0);
304 if (!gadget_is_sh(gadget)) 303 loopback_add(cdev, autoresume != 0);
305 loopback_add(cdev, autoresume != 0);
306 } 304 }
307 305
308 gcnum = usb_gadget_controller_number(gadget); 306 gcnum = usb_gadget_controller_number(gadget);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2678a1624fcc..8d3df0397de3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -399,3 +399,14 @@ config USB_HWA_HCD
399 399
400 To compile this driver a module, choose M here: the module 400 To compile this driver a module, choose M here: the module
401 will be called "hwa-hc". 401 will be called "hwa-hc".
402
403config USB_IMX21_HCD
404 tristate "iMX21 HCD support"
405 depends on USB && ARM && MACH_MX21
406 help
407 This driver enables support for the on-chip USB host in the
408 iMX21 processor.
409
410 To compile this driver as a module, choose M here: the
411 module will be called "imx21-hcd".
412
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f58b2494c44a..4e0c67f1f51b 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -32,3 +32,5 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
32obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o 32obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o 33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o 34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
35obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
36
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 87c1b7c34c0e..51bd0edf544f 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -149,7 +149,7 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
149 goto fail_request_resource; 149 goto fail_request_resource;
150 } 150 }
151 hcd->rsrc_start = res->start; 151 hcd->rsrc_start = res->start;
152 hcd->rsrc_len = res->end - res->start + 1; 152 hcd->rsrc_len = resource_size(res);
153 153
154 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, 154 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
155 driver->description)) { 155 driver->description)) {
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index dbfb482a94e3..e3a74e75e822 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -121,6 +121,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
121{ 121{
122 struct usb_hcd *hcd; 122 struct usb_hcd *hcd;
123 struct ehci_hcd *ehci; 123 struct ehci_hcd *ehci;
124 struct resource *res;
124 int ret; 125 int ret;
125 126
126 if (usb_disabled()) 127 if (usb_disabled())
@@ -144,8 +145,9 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
144 if (!hcd) 145 if (!hcd)
145 return -ENOMEM; 146 return -ENOMEM;
146 147
147 hcd->rsrc_start = pdev->resource[0].start; 148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
148 hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; 149 hcd->rsrc_start = res->start;
150 hcd->rsrc_len = resource_size(res);
149 151
150 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 152 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
151 pr_debug("request_mem_region failed"); 153 pr_debug("request_mem_region failed");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 991174937db3..0e26aa13f158 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 MontaVista Software 2 * Copyright 2005-2009 MontaVista Software, Inc.
3 * Copyright 2008 Freescale Semiconductor, Inc.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the 6 * under the terms of the GNU General Public License as published by the
@@ -17,17 +18,20 @@
17 * 18 *
18 * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided 19 * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
19 * by Hunter Wu. 20 * by Hunter Wu.
21 * Power Management support by Dave Liu <daveliu@freescale.com>,
22 * Jerry Huang <Chang-Ming.Huang@freescale.com> and
23 * Anton Vorontsov <avorontsov@ru.mvista.com>.
20 */ 24 */
21 25
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/delay.h>
29#include <linux/pm.h>
22#include <linux/platform_device.h> 30#include <linux/platform_device.h>
23#include <linux/fsl_devices.h> 31#include <linux/fsl_devices.h>
24 32
25#include "ehci-fsl.h" 33#include "ehci-fsl.h"
26 34
27/* FIXME: Power Management is un-ported so temporarily disable it */
28#undef CONFIG_PM
29
30
31/* configure so an HC device and id are always provided */ 35/* configure so an HC device and id are always provided */
32/* always called with process context; sleeping is OK */ 36/* always called with process context; sleeping is OK */
33 37
@@ -40,8 +44,8 @@
40 * Allocates basic resources for this USB host controller. 44 * Allocates basic resources for this USB host controller.
41 * 45 *
42 */ 46 */
43int usb_hcd_fsl_probe(const struct hc_driver *driver, 47static int usb_hcd_fsl_probe(const struct hc_driver *driver,
44 struct platform_device *pdev) 48 struct platform_device *pdev)
45{ 49{
46 struct fsl_usb2_platform_data *pdata; 50 struct fsl_usb2_platform_data *pdata;
47 struct usb_hcd *hcd; 51 struct usb_hcd *hcd;
@@ -147,7 +151,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
147 * Reverses the effect of usb_hcd_fsl_probe(). 151 * Reverses the effect of usb_hcd_fsl_probe().
148 * 152 *
149 */ 153 */
150void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev) 154static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
155 struct platform_device *pdev)
151{ 156{
152 usb_remove_hcd(hcd); 157 usb_remove_hcd(hcd);
153 iounmap(hcd->regs); 158 iounmap(hcd->regs);
@@ -284,10 +289,81 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
284 return retval; 289 return retval;
285} 290}
286 291
292struct ehci_fsl {
293 struct ehci_hcd ehci;
294
295#ifdef CONFIG_PM
296 /* Saved USB PHY settings, need to restore after deep sleep. */
297 u32 usb_ctrl;
298#endif
299};
300
301#ifdef CONFIG_PM
302
303static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
304{
305 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
306
307 return container_of(ehci, struct ehci_fsl, ehci);
308}
309
310static int ehci_fsl_drv_suspend(struct device *dev)
311{
312 struct usb_hcd *hcd = dev_get_drvdata(dev);
313 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
314 void __iomem *non_ehci = hcd->regs;
315
316 if (!fsl_deep_sleep())
317 return 0;
318
319 ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
320 return 0;
321}
322
323static int ehci_fsl_drv_resume(struct device *dev)
324{
325 struct usb_hcd *hcd = dev_get_drvdata(dev);
326 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
327 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
328 void __iomem *non_ehci = hcd->regs;
329
330 if (!fsl_deep_sleep())
331 return 0;
332
333 usb_root_hub_lost_power(hcd->self.root_hub);
334
335 /* Restore USB PHY settings and enable the controller. */
336 out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
337
338 ehci_reset(ehci);
339 ehci_fsl_reinit(ehci);
340
341 return 0;
342}
343
344static int ehci_fsl_drv_restore(struct device *dev)
345{
346 struct usb_hcd *hcd = dev_get_drvdata(dev);
347
348 usb_root_hub_lost_power(hcd->self.root_hub);
349 return 0;
350}
351
352static struct dev_pm_ops ehci_fsl_pm_ops = {
353 .suspend = ehci_fsl_drv_suspend,
354 .resume = ehci_fsl_drv_resume,
355 .restore = ehci_fsl_drv_restore,
356};
357
358#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
359#else
360#define EHCI_FSL_PM_OPS NULL
361#endif /* CONFIG_PM */
362
287static const struct hc_driver ehci_fsl_hc_driver = { 363static const struct hc_driver ehci_fsl_hc_driver = {
288 .description = hcd_name, 364 .description = hcd_name,
289 .product_desc = "Freescale On-Chip EHCI Host Controller", 365 .product_desc = "Freescale On-Chip EHCI Host Controller",
290 .hcd_priv_size = sizeof(struct ehci_hcd), 366 .hcd_priv_size = sizeof(struct ehci_fsl),
291 367
292 /* 368 /*
293 * generic hardware linkage 369 * generic hardware linkage
@@ -354,6 +430,7 @@ static struct platform_driver ehci_fsl_driver = {
354 .remove = ehci_fsl_drv_remove, 430 .remove = ehci_fsl_drv_remove,
355 .shutdown = usb_hcd_platform_shutdown, 431 .shutdown = usb_hcd_platform_shutdown,
356 .driver = { 432 .driver = {
357 .name = "fsl-ehci", 433 .name = "fsl-ehci",
434 .pm = EHCI_FSL_PM_OPS,
358 }, 435 },
359}; 436};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 35c56f40bdbb..23cd917088b4 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -162,6 +162,17 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
162 goto err_ioremap; 162 goto err_ioremap;
163 } 163 }
164 164
165 /* call platform specific init function */
166 if (pdata->init) {
167 ret = pdata->init(pdev);
168 if (ret) {
169 dev_err(dev, "platform init failed\n");
170 goto err_init;
171 }
172 /* platforms need some time to settle changed IO settings */
173 mdelay(10);
174 }
175
165 /* enable clocks */ 176 /* enable clocks */
166 priv->usbclk = clk_get(dev, "usb"); 177 priv->usbclk = clk_get(dev, "usb");
167 if (IS_ERR(priv->usbclk)) { 178 if (IS_ERR(priv->usbclk)) {
@@ -192,18 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
192 if (ret < 0) 203 if (ret < 0)
193 goto err_init; 204 goto err_init;
194 205
195 /* call platform specific init function */
196 if (pdata->init) {
197 ret = pdata->init(pdev);
198 if (ret) {
199 dev_err(dev, "platform init failed\n");
200 goto err_init;
201 }
202 }
203
204 /* most platforms need some time to settle changed IO settings */
205 mdelay(10);
206
207 /* Initialize the transceiver */ 206 /* Initialize the transceiver */
208 if (pdata->otg) { 207 if (pdata->otg) {
209 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; 208 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 74d07f4e8b7d..f0282d6bb7aa 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -26,10 +26,9 @@
26 * along with this program; if not, write to the Free Software 26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * 28 *
29 * TODO (last updated Feb 23rd, 2009): 29 * TODO (last updated Feb 12, 2010):
30 * - add kernel-doc 30 * - add kernel-doc
31 * - enable AUTOIDLE 31 * - enable AUTOIDLE
32 * - move DPLL5 programming to clock fw
33 * - add suspend/resume 32 * - add suspend/resume
34 * - move workarounds to board-files 33 * - move workarounds to board-files
35 */ 34 */
@@ -37,6 +36,7 @@
37#include <linux/platform_device.h> 36#include <linux/platform_device.h>
38#include <linux/clk.h> 37#include <linux/clk.h>
39#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h>
40#include <plat/usb.h> 40#include <plat/usb.h>
41 41
42/* 42/*
@@ -178,6 +178,11 @@ struct ehci_hcd_omap {
178 void __iomem *uhh_base; 178 void __iomem *uhh_base;
179 void __iomem *tll_base; 179 void __iomem *tll_base;
180 void __iomem *ehci_base; 180 void __iomem *ehci_base;
181
182 /* Regulators for USB PHYs.
183 * Each PHY can have a seperate regulator.
184 */
185 struct regulator *regulator[OMAP3_HS_USB_PORTS];
181}; 186};
182 187
183/*-------------------------------------------------------------------------*/ 188/*-------------------------------------------------------------------------*/
@@ -546,6 +551,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
546 551
547 int irq = platform_get_irq(pdev, 0); 552 int irq = platform_get_irq(pdev, 0);
548 int ret = -ENODEV; 553 int ret = -ENODEV;
554 int i;
555 char supply[7];
549 556
550 if (!pdata) { 557 if (!pdata) {
551 dev_dbg(&pdev->dev, "missing platform_data\n"); 558 dev_dbg(&pdev->dev, "missing platform_data\n");
@@ -613,6 +620,21 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
613 goto err_tll_ioremap; 620 goto err_tll_ioremap;
614 } 621 }
615 622
623 /* get ehci regulator and enable */
624 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
625 if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
626 omap->regulator[i] = NULL;
627 continue;
628 }
629 snprintf(supply, sizeof(supply), "hsusb%d", i);
630 omap->regulator[i] = regulator_get(omap->dev, supply);
631 if (IS_ERR(omap->regulator[i]))
632 dev_dbg(&pdev->dev,
633 "failed to get ehci port%d regulator\n", i);
634 else
635 regulator_enable(omap->regulator[i]);
636 }
637
616 ret = omap_start_ehc(omap, hcd); 638 ret = omap_start_ehc(omap, hcd);
617 if (ret) { 639 if (ret) {
618 dev_dbg(&pdev->dev, "failed to start ehci\n"); 640 dev_dbg(&pdev->dev, "failed to start ehci\n");
@@ -622,13 +644,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
622 omap->ehci->regs = hcd->regs 644 omap->ehci->regs = hcd->regs
623 + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase)); 645 + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
624 646
647 dbg_hcs_params(omap->ehci, "reset");
648 dbg_hcc_params(omap->ehci, "reset");
649
625 /* cache this readonly data; minimize chip reads */ 650 /* cache this readonly data; minimize chip reads */
626 omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params); 651 omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
627 652
628 /* SET 1 micro-frame Interrupt interval */
629 writel(readl(&omap->ehci->regs->command) | (1 << 16),
630 &omap->ehci->regs->command);
631
632 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); 653 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
633 if (ret) { 654 if (ret) {
634 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); 655 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
@@ -641,6 +662,12 @@ err_add_hcd:
641 omap_stop_ehc(omap, hcd); 662 omap_stop_ehc(omap, hcd);
642 663
643err_start: 664err_start:
665 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
666 if (omap->regulator[i]) {
667 regulator_disable(omap->regulator[i]);
668 regulator_put(omap->regulator[i]);
669 }
670 }
644 iounmap(omap->tll_base); 671 iounmap(omap->tll_base);
645 672
646err_tll_ioremap: 673err_tll_ioremap:
@@ -674,13 +701,21 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
674{ 701{
675 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev); 702 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
676 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci); 703 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
704 int i;
677 705
678 usb_remove_hcd(hcd); 706 usb_remove_hcd(hcd);
679 omap_stop_ehc(omap, hcd); 707 omap_stop_ehc(omap, hcd);
680 iounmap(hcd->regs); 708 iounmap(hcd->regs);
709 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
710 if (omap->regulator[i]) {
711 regulator_disable(omap->regulator[i]);
712 regulator_put(omap->regulator[i]);
713 }
714 }
681 iounmap(omap->tll_base); 715 iounmap(omap->tll_base);
682 iounmap(omap->uhh_base); 716 iounmap(omap->uhh_base);
683 usb_put_hcd(hcd); 717 usb_put_hcd(hcd);
718 kfree(omap);
684 719
685 return 0; 720 return 0;
686} 721}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1d283e1b2b8d..0f87dc72820a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -222,14 +222,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
222 goto err1; 222 goto err1;
223 } 223 }
224 224
225 if (!request_mem_region(res->start, res->end - res->start + 1, 225 if (!request_mem_region(res->start, resource_size(res),
226 ehci_orion_hc_driver.description)) { 226 ehci_orion_hc_driver.description)) {
227 dev_dbg(&pdev->dev, "controller already in use\n"); 227 dev_dbg(&pdev->dev, "controller already in use\n");
228 err = -EBUSY; 228 err = -EBUSY;
229 goto err1; 229 goto err1;
230 } 230 }
231 231
232 regs = ioremap(res->start, res->end - res->start + 1); 232 regs = ioremap(res->start, resource_size(res));
233 if (regs == NULL) { 233 if (regs == NULL) {
234 dev_dbg(&pdev->dev, "error mapping memory\n"); 234 dev_dbg(&pdev->dev, "error mapping memory\n");
235 err = -EFAULT; 235 err = -EFAULT;
@@ -244,7 +244,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
244 } 244 }
245 245
246 hcd->rsrc_start = res->start; 246 hcd->rsrc_start = res->start;
247 hcd->rsrc_len = res->end - res->start + 1; 247 hcd->rsrc_len = resource_size(res);
248 hcd->regs = regs; 248 hcd->regs = regs;
249 249
250 ehci = hcd_to_ehci(hcd); 250 ehci = hcd_to_ehci(hcd);
@@ -287,7 +287,7 @@ err4:
287err3: 287err3:
288 iounmap(regs); 288 iounmap(regs);
289err2: 289err2:
290 release_mem_region(res->start, res->end - res->start + 1); 290 release_mem_region(res->start, resource_size(res));
291err1: 291err1:
292 dev_err(&pdev->dev, "init %s fail, %d\n", 292 dev_err(&pdev->dev, "init %s fail, %d\n",
293 dev_name(&pdev->dev), err); 293 dev_name(&pdev->dev), err);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 36f96da129f5..8df33b8a634c 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -134,21 +134,21 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
134 hcd->rsrc_len = res.end - res.start + 1; 134 hcd->rsrc_len = res.end - res.start + 1;
135 135
136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
137 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 137 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
138 rv = -EBUSY; 138 rv = -EBUSY;
139 goto err_rmr; 139 goto err_rmr;
140 } 140 }
141 141
142 irq = irq_of_parse_and_map(dn, 0); 142 irq = irq_of_parse_and_map(dn, 0);
143 if (irq == NO_IRQ) { 143 if (irq == NO_IRQ) {
144 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 144 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
145 rv = -EBUSY; 145 rv = -EBUSY;
146 goto err_irq; 146 goto err_irq;
147 } 147 }
148 148
149 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 149 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
150 if (!hcd->regs) { 150 if (!hcd->regs) {
151 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 151 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
152 rv = -ENOMEM; 152 rv = -ENOMEM;
153 goto err_ioremap; 153 goto err_ioremap;
154 } 154 }
@@ -161,9 +161,9 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
161 ehci->ohci_hcctrl_reg = ioremap(res.start + 161 ehci->ohci_hcctrl_reg = ioremap(res.start +
162 OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN); 162 OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
163 else 163 else
164 pr_debug(__FILE__ ": no ohci offset in fdt\n"); 164 pr_debug("%s: no ohci offset in fdt\n", __FILE__);
165 if (!ehci->ohci_hcctrl_reg) { 165 if (!ehci->ohci_hcctrl_reg) {
166 pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n"); 166 pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
167 } else { 167 } else {
168 ehci->has_amcc_usb23 = 1; 168 ehci->has_amcc_usb23 = 1;
169 } 169 }
@@ -241,7 +241,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
241 else 241 else
242 release_mem_region(res.start, 0x4); 242 release_mem_region(res.start, 0x4);
243 else 243 else
244 pr_debug(__FILE__ ": no ohci offset in fdt\n"); 244 pr_debug("%s: no ohci offset in fdt\n", __FILE__);
245 of_node_put(np); 245 of_node_put(np);
246 } 246 }
247 247
@@ -264,7 +264,7 @@ static int ehci_hcd_ppc_of_shutdown(struct of_device *op)
264} 264}
265 265
266 266
267static struct of_device_id ehci_hcd_ppc_of_match[] = { 267static const struct of_device_id ehci_hcd_ppc_of_match[] = {
268 { 268 {
269 .compatible = "usb-ehci", 269 .compatible = "usb-ehci",
270 }, 270 },
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1e391e624c8a..39340ae00ac4 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,6 +510,8 @@ static int disable_periodic (struct ehci_hcd *ehci)
510 ehci_writel(ehci, cmd, &ehci->regs->command); 510 ehci_writel(ehci, cmd, &ehci->regs->command);
511 /* posted write ... */ 511 /* posted write ... */
512 512
513 free_cached_itd_list(ehci);
514
513 ehci->next_uframe = -1; 515 ehci->next_uframe = -1;
514 return 0; 516 return 0;
515} 517}
@@ -2322,9 +2324,13 @@ restart:
2322 * No need to check for activity unless the 2324 * No need to check for activity unless the
2323 * frame is current. 2325 * frame is current.
2324 */ 2326 */
2325 if (frame == clock_frame && live && 2327 if (((frame == clock_frame) ||
2326 (q.sitd->hw_results & 2328 (((frame + 1) % ehci->periodic_size)
2327 SITD_ACTIVE(ehci))) { 2329 == clock_frame))
2330 && live
2331 && (q.sitd->hw_results &
2332 SITD_ACTIVE(ehci))) {
2333
2328 incomplete = true; 2334 incomplete = true;
2329 q_p = &q.sitd->sitd_next; 2335 q_p = &q.sitd->sitd_next;
2330 hw_p = &q.sitd->hw_next; 2336 hw_p = &q.sitd->hw_next;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a5861531ad3e..f603bb2c0a8e 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -177,21 +177,21 @@ ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
177 hcd->rsrc_len = res.end - res.start + 1; 177 hcd->rsrc_len = res.end - res.start + 1;
178 178
179 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 179 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
180 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 180 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
181 rv = -EBUSY; 181 rv = -EBUSY;
182 goto err_rmr; 182 goto err_rmr;
183 } 183 }
184 184
185 irq = irq_of_parse_and_map(dn, 0); 185 irq = irq_of_parse_and_map(dn, 0);
186 if (irq == NO_IRQ) { 186 if (irq == NO_IRQ) {
187 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 187 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
188 rv = -EBUSY; 188 rv = -EBUSY;
189 goto err_irq; 189 goto err_irq;
190 } 190 }
191 191
192 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 192 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
193 if (!hcd->regs) { 193 if (!hcd->regs) {
194 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 194 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
195 rv = -ENOMEM; 195 rv = -ENOMEM;
196 goto err_ioremap; 196 goto err_ioremap;
197 } 197 }
@@ -281,7 +281,7 @@ static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
281} 281}
282 282
283 283
284static struct of_device_id ehci_hcd_xilinx_of_match[] = { 284static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
285 {.compatible = "xlnx,xps-usb-host-1.00.a",}, 285 {.compatible = "xlnx,xps-usb-host-1.00.a",},
286 {}, 286 {},
287}; 287};
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 78e7c3cfcb72..5dcfb3de9945 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -433,7 +433,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
433 return -ENOMEM; 433 return -ENOMEM;
434 434
435 /* allocate the private part of the URB */ 435 /* allocate the private part of the URB */
436 urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags); 436 urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
437 if (!urb_priv->tds) { 437 if (!urb_priv->tds) {
438 kfree(urb_priv); 438 kfree(urb_priv);
439 return -ENOMEM; 439 return -ENOMEM;
@@ -805,7 +805,7 @@ static int __devexit of_fhci_remove(struct of_device *ofdev)
805 return fhci_remove(&ofdev->dev); 805 return fhci_remove(&ofdev->dev);
806} 806}
807 807
808static struct of_device_id of_fhci_match[] = { 808static const struct of_device_id of_fhci_match[] = {
809 { .compatible = "fsl,mpc8323-qe-usb", }, 809 { .compatible = "fsl,mpc8323-qe-usb", },
810 {}, 810 {},
811}; 811};
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 000000000000..512f647448ca
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright (c) 2009 by Martin Fuzzey
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* this file is part of imx21-hcd.c */
20
21#ifndef DEBUG
22
23static inline void create_debug_files(struct imx21 *imx21) { }
24static inline void remove_debug_files(struct imx21 *imx21) { }
25static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
26static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
27 int status) {}
28static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
29static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
30 struct urb *urb) {}
31static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
32 struct urb *urb) {}
33static inline void debug_etd_allocated(struct imx21 *imx21) {}
34static inline void debug_etd_freed(struct imx21 *imx21) {}
35static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
36static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
37static inline void debug_isoc_submitted(struct imx21 *imx21,
38 int frame, struct td *td) {}
39static inline void debug_isoc_completed(struct imx21 *imx21,
40 int frame, struct td *td, int cc, int len) {}
41
42#else
43
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
46
47static const char *dir_labels[] = {
48 "TD 0",
49 "OUT",
50 "IN",
51 "TD 1"
52};
53
54static const char *speed_labels[] = {
55 "Full",
56 "Low"
57};
58
59static const char *format_labels[] = {
60 "Control",
61 "ISO",
62 "Bulk",
63 "Interrupt"
64};
65
66static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
67 struct urb *urb)
68{
69 return usb_pipeisoc(urb->pipe) ?
70 &imx21->isoc_stats : &imx21->nonisoc_stats;
71}
72
73static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
74{
75 stats_for_urb(imx21, urb)->submitted++;
76}
77
78static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
79{
80 if (st)
81 stats_for_urb(imx21, urb)->completed_failed++;
82 else
83 stats_for_urb(imx21, urb)->completed_ok++;
84}
85
86static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
87{
88 stats_for_urb(imx21, urb)->unlinked++;
89}
90
91static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
92{
93 stats_for_urb(imx21, urb)->queue_etd++;
94}
95
96static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
97{
98 stats_for_urb(imx21, urb)->queue_dmem++;
99}
100
101static inline void debug_etd_allocated(struct imx21 *imx21)
102{
103 imx21->etd_usage.maximum = max(
104 ++(imx21->etd_usage.value),
105 imx21->etd_usage.maximum);
106}
107
108static inline void debug_etd_freed(struct imx21 *imx21)
109{
110 imx21->etd_usage.value--;
111}
112
113static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
114{
115 imx21->dmem_usage.value += size;
116 imx21->dmem_usage.maximum = max(
117 imx21->dmem_usage.value,
118 imx21->dmem_usage.maximum);
119}
120
121static inline void debug_dmem_freed(struct imx21 *imx21, int size)
122{
123 imx21->dmem_usage.value -= size;
124}
125
126
127static void debug_isoc_submitted(struct imx21 *imx21,
128 int frame, struct td *td)
129{
130 struct debug_isoc_trace *trace = &imx21->isoc_trace[
131 imx21->isoc_trace_index++];
132
133 imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
134 trace->schedule_frame = td->frame;
135 trace->submit_frame = frame;
136 trace->request_len = td->len;
137 trace->td = td;
138}
139
140static inline void debug_isoc_completed(struct imx21 *imx21,
141 int frame, struct td *td, int cc, int len)
142{
143 struct debug_isoc_trace *trace, *trace_failed;
144 int i;
145 int found = 0;
146
147 trace = imx21->isoc_trace;
148 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
149 if (trace->td == td) {
150 trace->done_frame = frame;
151 trace->done_len = len;
152 trace->cc = cc;
153 trace->td = NULL;
154 found = 1;
155 break;
156 }
157 }
158
159 if (found && cc) {
160 trace_failed = &imx21->isoc_trace_failed[
161 imx21->isoc_trace_index_failed++];
162
163 imx21->isoc_trace_index_failed %= ARRAY_SIZE(
164 imx21->isoc_trace_failed);
165 *trace_failed = *trace;
166 }
167}
168
169
170static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
171{
172 if (ep)
173 snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
174 ep->desc.bEndpointAddress,
175 usb_endpoint_type(&ep->desc),
176 ep);
177 else
178 snprintf(buf, bufsize, "none");
179 return buf;
180}
181
182static char *format_etd_dword0(u32 value, char *buf, int bufsize)
183{
184 snprintf(buf, bufsize,
185 "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
186 value & 0x7F,
187 (value >> DW0_ENDPNT) & 0x0F,
188 dir_labels[(value >> DW0_DIRECT) & 0x03],
189 speed_labels[(value >> DW0_SPEED) & 0x01],
190 format_labels[(value >> DW0_FORMAT) & 0x03],
191 (value >> DW0_HALTED) & 0x01);
192 return buf;
193}
194
195static int debug_status_show(struct seq_file *s, void *v)
196{
197 struct imx21 *imx21 = s->private;
198 int etds_allocated = 0;
199 int etds_sw_busy = 0;
200 int etds_hw_busy = 0;
201 int dmem_blocks = 0;
202 int queued_for_etd = 0;
203 int queued_for_dmem = 0;
204 unsigned int dmem_bytes = 0;
205 int i;
206 struct etd_priv *etd;
207 u32 etd_enable_mask;
208 unsigned long flags;
209 struct imx21_dmem_area *dmem;
210 struct ep_priv *ep_priv;
211
212 spin_lock_irqsave(&imx21->lock, flags);
213
214 etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
215 for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
216 if (etd->alloc)
217 etds_allocated++;
218 if (etd->urb)
219 etds_sw_busy++;
220 if (etd_enable_mask & (1<<i))
221 etds_hw_busy++;
222 }
223
224 list_for_each_entry(dmem, &imx21->dmem_list, list) {
225 dmem_bytes += dmem->size;
226 dmem_blocks++;
227 }
228
229 list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
230 queued_for_etd++;
231
232 list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
233 queued_for_dmem++;
234
235 spin_unlock_irqrestore(&imx21->lock, flags);
236
237 seq_printf(s,
238 "Frame: %d\n"
239 "ETDs allocated: %d/%d (max=%d)\n"
240 "ETDs in use sw: %d\n"
241 "ETDs in use hw: %d\n"
242 "DMEM alocated: %d/%d (max=%d)\n"
243 "DMEM blocks: %d\n"
244 "Queued waiting for ETD: %d\n"
245 "Queued waiting for DMEM: %d\n",
246 readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
247 etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
248 etds_sw_busy,
249 etds_hw_busy,
250 dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
251 dmem_blocks,
252 queued_for_etd,
253 queued_for_dmem);
254
255 return 0;
256}
257
258static int debug_dmem_show(struct seq_file *s, void *v)
259{
260 struct imx21 *imx21 = s->private;
261 struct imx21_dmem_area *dmem;
262 unsigned long flags;
263 char ep_text[40];
264
265 spin_lock_irqsave(&imx21->lock, flags);
266
267 list_for_each_entry(dmem, &imx21->dmem_list, list)
268 seq_printf(s,
269 "%04X: size=0x%X "
270 "ep=%s\n",
271 dmem->offset, dmem->size,
272 format_ep(dmem->ep, ep_text, sizeof(ep_text)));
273
274 spin_unlock_irqrestore(&imx21->lock, flags);
275
276 return 0;
277}
278
279static int debug_etd_show(struct seq_file *s, void *v)
280{
281 struct imx21 *imx21 = s->private;
282 struct etd_priv *etd;
283 char buf[60];
284 u32 dword;
285 int i, j;
286 unsigned long flags;
287
288 spin_lock_irqsave(&imx21->lock, flags);
289
290 for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
291 int state = -1;
292 struct urb_priv *urb_priv;
293 if (etd->urb) {
294 urb_priv = etd->urb->hcpriv;
295 if (urb_priv)
296 state = urb_priv->state;
297 }
298
299 seq_printf(s,
300 "etd_num: %d\n"
301 "ep: %s\n"
302 "alloc: %d\n"
303 "len: %d\n"
304 "busy sw: %d\n"
305 "busy hw: %d\n"
306 "urb state: %d\n"
307 "current urb: %p\n",
308
309 i,
310 format_ep(etd->ep, buf, sizeof(buf)),
311 etd->alloc,
312 etd->len,
313 etd->urb != NULL,
314 (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
315 state,
316 etd->urb);
317
318 for (j = 0; j < 4; j++) {
319 dword = etd_readl(imx21, i, j);
320 switch (j) {
321 case 0:
322 format_etd_dword0(dword, buf, sizeof(buf));
323 break;
324 case 2:
325 snprintf(buf, sizeof(buf),
326 "cc=0X%02X", dword >> DW2_COMPCODE);
327 break;
328 default:
329 *buf = 0;
330 break;
331 }
332 seq_printf(s,
333 "dword %d: submitted=%08X cur=%08X [%s]\n",
334 j,
335 etd->submitted_dwords[j],
336 dword,
337 buf);
338 }
339 seq_printf(s, "\n");
340 }
341
342 spin_unlock_irqrestore(&imx21->lock, flags);
343
344 return 0;
345}
346
347static void debug_statistics_show_one(struct seq_file *s,
348 const char *name, struct debug_stats *stats)
349{
350 seq_printf(s, "%s:\n"
351 "submitted URBs: %lu\n"
352 "completed OK: %lu\n"
353 "completed failed: %lu\n"
354 "unlinked: %lu\n"
355 "queued for ETD: %lu\n"
356 "queued for DMEM: %lu\n\n",
357 name,
358 stats->submitted,
359 stats->completed_ok,
360 stats->completed_failed,
361 stats->unlinked,
362 stats->queue_etd,
363 stats->queue_dmem);
364}
365
366static int debug_statistics_show(struct seq_file *s, void *v)
367{
368 struct imx21 *imx21 = s->private;
369 unsigned long flags;
370
371 spin_lock_irqsave(&imx21->lock, flags);
372
373 debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
374 debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
375 seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
376 spin_unlock_irqrestore(&imx21->lock, flags);
377
378 return 0;
379}
380
381static void debug_isoc_show_one(struct seq_file *s,
382 const char *name, int index, struct debug_isoc_trace *trace)
383{
384 seq_printf(s, "%s %d:\n"
385 "cc=0X%02X\n"
386 "scheduled frame %d (%d)\n"
387 "submittted frame %d (%d)\n"
388 "completed frame %d (%d)\n"
389 "requested length=%d\n"
390 "completed length=%d\n\n",
391 name, index,
392 trace->cc,
393 trace->schedule_frame, trace->schedule_frame & 0xFFFF,
394 trace->submit_frame, trace->submit_frame & 0xFFFF,
395 trace->done_frame, trace->done_frame & 0xFFFF,
396 trace->request_len,
397 trace->done_len);
398}
399
400static int debug_isoc_show(struct seq_file *s, void *v)
401{
402 struct imx21 *imx21 = s->private;
403 struct debug_isoc_trace *trace;
404 unsigned long flags;
405 int i;
406
407 spin_lock_irqsave(&imx21->lock, flags);
408
409 trace = imx21->isoc_trace_failed;
410 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
411 debug_isoc_show_one(s, "isoc failed", i, trace);
412
413 trace = imx21->isoc_trace;
414 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
415 debug_isoc_show_one(s, "isoc", i, trace);
416
417 spin_unlock_irqrestore(&imx21->lock, flags);
418
419 return 0;
420}
421
422static int debug_status_open(struct inode *inode, struct file *file)
423{
424 return single_open(file, debug_status_show, inode->i_private);
425}
426
427static int debug_dmem_open(struct inode *inode, struct file *file)
428{
429 return single_open(file, debug_dmem_show, inode->i_private);
430}
431
432static int debug_etd_open(struct inode *inode, struct file *file)
433{
434 return single_open(file, debug_etd_show, inode->i_private);
435}
436
437static int debug_statistics_open(struct inode *inode, struct file *file)
438{
439 return single_open(file, debug_statistics_show, inode->i_private);
440}
441
442static int debug_isoc_open(struct inode *inode, struct file *file)
443{
444 return single_open(file, debug_isoc_show, inode->i_private);
445}
446
447static const struct file_operations debug_status_fops = {
448 .open = debug_status_open,
449 .read = seq_read,
450 .llseek = seq_lseek,
451 .release = single_release,
452};
453
454static const struct file_operations debug_dmem_fops = {
455 .open = debug_dmem_open,
456 .read = seq_read,
457 .llseek = seq_lseek,
458 .release = single_release,
459};
460
461static const struct file_operations debug_etd_fops = {
462 .open = debug_etd_open,
463 .read = seq_read,
464 .llseek = seq_lseek,
465 .release = single_release,
466};
467
468static const struct file_operations debug_statistics_fops = {
469 .open = debug_statistics_open,
470 .read = seq_read,
471 .llseek = seq_lseek,
472 .release = single_release,
473};
474
475static const struct file_operations debug_isoc_fops = {
476 .open = debug_isoc_open,
477 .read = seq_read,
478 .llseek = seq_lseek,
479 .release = single_release,
480};
481
482static void create_debug_files(struct imx21 *imx21)
483{
484 imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
485 if (!imx21->debug_root)
486 goto failed_create_rootdir;
487
488 if (!debugfs_create_file("status", S_IRUGO,
489 imx21->debug_root, imx21, &debug_status_fops))
490 goto failed_create;
491
492 if (!debugfs_create_file("dmem", S_IRUGO,
493 imx21->debug_root, imx21, &debug_dmem_fops))
494 goto failed_create;
495
496 if (!debugfs_create_file("etd", S_IRUGO,
497 imx21->debug_root, imx21, &debug_etd_fops))
498 goto failed_create;
499
500 if (!debugfs_create_file("statistics", S_IRUGO,
501 imx21->debug_root, imx21, &debug_statistics_fops))
502 goto failed_create;
503
504 if (!debugfs_create_file("isoc", S_IRUGO,
505 imx21->debug_root, imx21, &debug_isoc_fops))
506 goto failed_create;
507
508 return;
509
510failed_create:
511 debugfs_remove_recursive(imx21->debug_root);
512
513failed_create_rootdir:
514 imx21->debug_root = NULL;
515}
516
517
518static void remove_debug_files(struct imx21 *imx21)
519{
520 if (imx21->debug_root) {
521 debugfs_remove_recursive(imx21->debug_root);
522 imx21->debug_root = NULL;
523 }
524}
525
526#endif
527
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 000000000000..213e270e1c29
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1789 @@
1/*
2 * USB Host Controller Driver for IMX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24
25 /*
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
29 *
30 * The data memory is shared between the host and fuction controlers
31 * (but this driver only supports the host controler)
32 *
33 * So setting up a transfer involves:
34 * * Allocating a ETD
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
37 * * Activate the ETD
38 * * Get interrupt when done.
39 *
40 * An ETD is assigned to each active endpoint.
41 *
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
44 *
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46 *
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
50 */
51
52#include <linux/clk.h>
53#include <linux/io.h>
54#include <linux/kernel.h>
55#include <linux/list.h>
56#include <linux/platform_device.h>
57#include <linux/usb.h>
58
59#include "../core/hcd.h"
60#include "imx21-hcd.h"
61
62#ifdef DEBUG
63#define DEBUG_LOG_FRAME(imx21, etd, event) \
64 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
65#else
66#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
67#endif
68
69static const char hcd_name[] = "imx21-hcd";
70
71static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
72{
73 return (struct imx21 *)hcd->hcd_priv;
74}
75
76
77/* =========================================== */
78/* Hardware access helpers */
79/* =========================================== */
80
81static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
82{
83 void __iomem *reg = imx21->regs + offset;
84 writel(readl(reg) | mask, reg);
85}
86
87static inline void clear_register_bits(struct imx21 *imx21,
88 u32 offset, u32 mask)
89{
90 void __iomem *reg = imx21->regs + offset;
91 writel(readl(reg) & ~mask, reg);
92}
93
94static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
95{
96 void __iomem *reg = imx21->regs + offset;
97
98 if (readl(reg) & mask)
99 writel(mask, reg);
100}
101
102static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
103{
104 void __iomem *reg = imx21->regs + offset;
105
106 if (!(readl(reg) & mask))
107 writel(mask, reg);
108}
109
110static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
111{
112 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
113}
114
115static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
116{
117 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
118}
119
120static inline int wrap_frame(int counter)
121{
122 return counter & 0xFFFF;
123}
124
125static inline int frame_after(int frame, int after)
126{
127 /* handle wrapping like jiffies time_afer */
128 return (s16)((s16)after - (s16)frame) < 0;
129}
130
131static int imx21_hc_get_frame(struct usb_hcd *hcd)
132{
133 struct imx21 *imx21 = hcd_to_imx21(hcd);
134
135 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
136}
137
138
139#include "imx21-dbg.c"
140
141/* =========================================== */
142/* ETD management */
143/* =========================================== */
144
145static int alloc_etd(struct imx21 *imx21)
146{
147 int i;
148 struct etd_priv *etd = imx21->etd;
149
150 for (i = 0; i < USB_NUM_ETD; i++, etd++) {
151 if (etd->alloc == 0) {
152 memset(etd, 0, sizeof(imx21->etd[0]));
153 etd->alloc = 1;
154 debug_etd_allocated(imx21);
155 return i;
156 }
157 }
158 return -1;
159}
160
161static void disactivate_etd(struct imx21 *imx21, int num)
162{
163 int etd_mask = (1 << num);
164 struct etd_priv *etd = &imx21->etd[num];
165
166 writel(etd_mask, imx21->regs + USBH_ETDENCLR);
167 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
168 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
169 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
170
171 etd->active_count = 0;
172
173 DEBUG_LOG_FRAME(imx21, etd, disactivated);
174}
175
176static void reset_etd(struct imx21 *imx21, int num)
177{
178 struct etd_priv *etd = imx21->etd + num;
179 int i;
180
181 disactivate_etd(imx21, num);
182
183 for (i = 0; i < 4; i++)
184 etd_writel(imx21, num, i, 0);
185 etd->urb = NULL;
186 etd->ep = NULL;
187 etd->td = NULL;;
188}
189
190static void free_etd(struct imx21 *imx21, int num)
191{
192 if (num < 0)
193 return;
194
195 if (num >= USB_NUM_ETD) {
196 dev_err(imx21->dev, "BAD etd=%d!\n", num);
197 return;
198 }
199 if (imx21->etd[num].alloc == 0) {
200 dev_err(imx21->dev, "ETD %d already free!\n", num);
201 return;
202 }
203
204 debug_etd_freed(imx21);
205 reset_etd(imx21, num);
206 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
207}
208
209
210static void setup_etd_dword0(struct imx21 *imx21,
211 int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
212{
213 etd_writel(imx21, etd_num, 0,
214 ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
215 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
216 ((u32) dir << DW0_DIRECT) |
217 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
218 1 : 0) << DW0_SPEED) |
219 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
220 ((u32) maxpacket << DW0_MAXPKTSIZ));
221}
222
223static void activate_etd(struct imx21 *imx21,
224 int etd_num, dma_addr_t dma, u8 dir)
225{
226 u32 etd_mask = 1 << etd_num;
227 struct etd_priv *etd = &imx21->etd[etd_num];
228
229 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
230 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
231 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
232 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
233
234 if (dma) {
235 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
236 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
237 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
238 writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
239 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
240 } else {
241 if (dir != TD_DIR_IN) {
242 /* need to set for ZLP */
243 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
244 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
245 }
246 }
247
248 DEBUG_LOG_FRAME(imx21, etd, activated);
249
250#ifdef DEBUG
251 if (!etd->active_count) {
252 int i;
253 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
254 etd->disactivated_frame = -1;
255 etd->last_int_frame = -1;
256 etd->last_req_frame = -1;
257
258 for (i = 0; i < 4; i++)
259 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
260 }
261#endif
262
263 etd->active_count = 1;
264 writel(etd_mask, imx21->regs + USBH_ETDENSET);
265}
266
267/* =========================================== */
268/* Data memory management */
269/* =========================================== */
270
271static int alloc_dmem(struct imx21 *imx21, unsigned int size,
272 struct usb_host_endpoint *ep)
273{
274 unsigned int offset = 0;
275 struct imx21_dmem_area *area;
276 struct imx21_dmem_area *tmp;
277
278 size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
279
280 if (size > DMEM_SIZE) {
281 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
282 size, DMEM_SIZE);
283 return -EINVAL;
284 }
285
286 list_for_each_entry(tmp, &imx21->dmem_list, list) {
287 if ((size + offset) < offset)
288 goto fail;
289 if ((size + offset) <= tmp->offset)
290 break;
291 offset = tmp->size + tmp->offset;
292 if ((offset + size) > DMEM_SIZE)
293 goto fail;
294 }
295
296 area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
297 if (area == NULL)
298 return -ENOMEM;
299
300 area->ep = ep;
301 area->offset = offset;
302 area->size = size;
303 list_add_tail(&area->list, &tmp->list);
304 debug_dmem_allocated(imx21, size);
305 return offset;
306
307fail:
308 return -ENOMEM;
309}
310
311/* Memory now available for a queued ETD - activate it */
312static void activate_queued_etd(struct imx21 *imx21,
313 struct etd_priv *etd, u32 dmem_offset)
314{
315 struct urb_priv *urb_priv = etd->urb->hcpriv;
316 int etd_num = etd - &imx21->etd[0];
317 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
318 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
319
320 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
321 etd_num);
322 etd_writel(imx21, etd_num, 1,
323 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
324
325 urb_priv->active = 1;
326 activate_etd(imx21, etd_num, etd->dma_handle, dir);
327}
328
329static void free_dmem(struct imx21 *imx21, int offset)
330{
331 struct imx21_dmem_area *area;
332 struct etd_priv *etd, *tmp;
333 int found = 0;
334
335 list_for_each_entry(area, &imx21->dmem_list, list) {
336 if (area->offset == offset) {
337 debug_dmem_freed(imx21, area->size);
338 list_del(&area->list);
339 kfree(area);
340 found = 1;
341 break;
342 }
343 }
344
345 if (!found) {
346 dev_err(imx21->dev,
347 "Trying to free unallocated DMEM %d\n", offset);
348 return;
349 }
350
351 /* Try again to allocate memory for anything we've queued */
352 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
353 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
354 if (offset >= 0) {
355 list_del(&etd->queue);
356 activate_queued_etd(imx21, etd, (u32)offset);
357 }
358 }
359}
360
361static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
362{
363 struct imx21_dmem_area *area, *tmp;
364
365 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
366 if (area->ep == ep) {
367 dev_err(imx21->dev,
368 "Active DMEM %d for disabled ep=%p\n",
369 area->offset, ep);
370 list_del(&area->list);
371 kfree(area);
372 }
373 }
374}
375
376
377/* =========================================== */
378/* End handling */
379/* =========================================== */
380static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
381
382/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
383static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
384{
385 int etd_num;
386 int i;
387
388 for (i = 0; i < NUM_ISO_ETDS; i++) {
389 etd_num = ep_priv->etd[i];
390 if (etd_num < 0)
391 continue;
392
393 ep_priv->etd[i] = -1;
394 if (list_empty(&imx21->queue_for_etd)) {
395 free_etd(imx21, etd_num);
396 continue;
397 }
398
399 dev_dbg(imx21->dev,
400 "assigning idle etd %d for queued request\n", etd_num);
401 ep_priv = list_first_entry(&imx21->queue_for_etd,
402 struct ep_priv, queue);
403 list_del(&ep_priv->queue);
404 reset_etd(imx21, etd_num);
405 ep_priv->waiting_etd = 0;
406 ep_priv->etd[i] = etd_num;
407
408 if (list_empty(&ep_priv->ep->urb_list)) {
409 dev_err(imx21->dev, "No urb for queued ep!\n");
410 continue;
411 }
412 schedule_nonisoc_etd(imx21, list_first_entry(
413 &ep_priv->ep->urb_list, struct urb, urb_list));
414 }
415}
416
417static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
418__releases(imx21->lock)
419__acquires(imx21->lock)
420{
421 struct imx21 *imx21 = hcd_to_imx21(hcd);
422 struct ep_priv *ep_priv = urb->ep->hcpriv;
423 struct urb_priv *urb_priv = urb->hcpriv;
424
425 debug_urb_completed(imx21, urb, status);
426 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
427
428 kfree(urb_priv->isoc_td);
429 kfree(urb->hcpriv);
430 urb->hcpriv = NULL;
431 usb_hcd_unlink_urb_from_ep(hcd, urb);
432 spin_unlock(&imx21->lock);
433 usb_hcd_giveback_urb(hcd, urb, status);
434 spin_lock(&imx21->lock);
435 if (list_empty(&ep_priv->ep->urb_list))
436 ep_idle(imx21, ep_priv);
437}
438
439/* =========================================== */
440/* ISOC Handling ... */
441/* =========================================== */
442
443static void schedule_isoc_etds(struct usb_hcd *hcd,
444 struct usb_host_endpoint *ep)
445{
446 struct imx21 *imx21 = hcd_to_imx21(hcd);
447 struct ep_priv *ep_priv = ep->hcpriv;
448 struct etd_priv *etd;
449 struct urb_priv *urb_priv;
450 struct td *td;
451 int etd_num;
452 int i;
453 int cur_frame;
454 u8 dir;
455
456 for (i = 0; i < NUM_ISO_ETDS; i++) {
457too_late:
458 if (list_empty(&ep_priv->td_list))
459 break;
460
461 etd_num = ep_priv->etd[i];
462 if (etd_num < 0)
463 break;
464
465 etd = &imx21->etd[etd_num];
466 if (etd->urb)
467 continue;
468
469 td = list_entry(ep_priv->td_list.next, struct td, list);
470 list_del(&td->list);
471 urb_priv = td->urb->hcpriv;
472
473 cur_frame = imx21_hc_get_frame(hcd);
474 if (frame_after(cur_frame, td->frame)) {
475 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
476 cur_frame, td->frame);
477 urb_priv->isoc_status = -EXDEV;
478 td->urb->iso_frame_desc[
479 td->isoc_index].actual_length = 0;
480 td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
481 if (--urb_priv->isoc_remaining == 0)
482 urb_done(hcd, td->urb, urb_priv->isoc_status);
483 goto too_late;
484 }
485
486 urb_priv->active = 1;
487 etd->td = td;
488 etd->ep = td->ep;
489 etd->urb = td->urb;
490 etd->len = td->len;
491
492 debug_isoc_submitted(imx21, cur_frame, td);
493
494 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
495 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
496 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
497 etd_writel(imx21, etd_num, 2,
498 (TD_NOTACCESSED << DW2_COMPCODE) |
499 ((td->frame & 0xFFFF) << DW2_STARTFRM));
500 etd_writel(imx21, etd_num, 3,
501 (TD_NOTACCESSED << DW3_COMPCODE0) |
502 (td->len << DW3_PKTLEN0));
503
504 activate_etd(imx21, etd_num, td->data, dir);
505 }
506}
507
508static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
509{
510 struct imx21 *imx21 = hcd_to_imx21(hcd);
511 int etd_mask = 1 << etd_num;
512 struct urb_priv *urb_priv = urb->hcpriv;
513 struct etd_priv *etd = imx21->etd + etd_num;
514 struct td *td = etd->td;
515 struct usb_host_endpoint *ep = etd->ep;
516 int isoc_index = td->isoc_index;
517 unsigned int pipe = urb->pipe;
518 int dir_in = usb_pipein(pipe);
519 int cc;
520 int bytes_xfrd;
521
522 disactivate_etd(imx21, etd_num);
523
524 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
525 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
526
527 /* Input doesn't always fill the buffer, don't generate an error
528 * when this happens.
529 */
530 if (dir_in && (cc == TD_DATAUNDERRUN))
531 cc = TD_CC_NOERROR;
532
533 if (cc == TD_NOTACCESSED)
534 bytes_xfrd = 0;
535
536 debug_isoc_completed(imx21,
537 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
538 if (cc) {
539 urb_priv->isoc_status = -EXDEV;
540 dev_dbg(imx21->dev,
541 "bad iso cc=0x%X frame=%d sched frame=%d "
542 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
543 cc, imx21_hc_get_frame(hcd), td->frame,
544 bytes_xfrd, td->len, urb, etd_num, isoc_index);
545 }
546
547 if (dir_in)
548 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
549
550 urb->actual_length += bytes_xfrd;
551 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
552 urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
553
554 etd->td = NULL;
555 etd->urb = NULL;
556 etd->ep = NULL;
557
558 if (--urb_priv->isoc_remaining == 0)
559 urb_done(hcd, urb, urb_priv->isoc_status);
560
561 schedule_isoc_etds(hcd, ep);
562}
563
564static struct ep_priv *alloc_isoc_ep(
565 struct imx21 *imx21, struct usb_host_endpoint *ep)
566{
567 struct ep_priv *ep_priv;
568 int i;
569
570 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
571 if (ep_priv == NULL)
572 return NULL;
573
574 /* Allocate the ETDs */
575 for (i = 0; i < NUM_ISO_ETDS; i++) {
576 ep_priv->etd[i] = alloc_etd(imx21);
577 if (ep_priv->etd[i] < 0) {
578 int j;
579 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
580 for (j = 0; j < i; j++)
581 free_etd(imx21, ep_priv->etd[j]);
582 goto alloc_etd_failed;
583 }
584 imx21->etd[ep_priv->etd[i]].ep = ep;
585 }
586
587 INIT_LIST_HEAD(&ep_priv->td_list);
588 ep_priv->ep = ep;
589 ep->hcpriv = ep_priv;
590 return ep_priv;
591
592alloc_etd_failed:
593 kfree(ep_priv);
594 return NULL;
595}
596
597static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
598 struct usb_host_endpoint *ep,
599 struct urb *urb, gfp_t mem_flags)
600{
601 struct imx21 *imx21 = hcd_to_imx21(hcd);
602 struct urb_priv *urb_priv;
603 unsigned long flags;
604 struct ep_priv *ep_priv;
605 struct td *td = NULL;
606 int i;
607 int ret;
608 int cur_frame;
609 u16 maxpacket;
610
611 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
612 if (urb_priv == NULL)
613 return -ENOMEM;
614
615 urb_priv->isoc_td = kzalloc(
616 sizeof(struct td) * urb->number_of_packets, mem_flags);
617 if (urb_priv->isoc_td == NULL) {
618 ret = -ENOMEM;
619 goto alloc_td_failed;
620 }
621
622 spin_lock_irqsave(&imx21->lock, flags);
623
624 if (ep->hcpriv == NULL) {
625 ep_priv = alloc_isoc_ep(imx21, ep);
626 if (ep_priv == NULL) {
627 ret = -ENOMEM;
628 goto alloc_ep_failed;
629 }
630 } else {
631 ep_priv = ep->hcpriv;
632 }
633
634 ret = usb_hcd_link_urb_to_ep(hcd, urb);
635 if (ret)
636 goto link_failed;
637
638 urb->status = -EINPROGRESS;
639 urb->actual_length = 0;
640 urb->error_count = 0;
641 urb->hcpriv = urb_priv;
642 urb_priv->ep = ep;
643
644 /* allocate data memory for largest packets if not already done */
645 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
646 for (i = 0; i < NUM_ISO_ETDS; i++) {
647 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
648
649 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
650 /* not sure if this can really occur.... */
651 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
652 etd->dmem_size, maxpacket);
653 ret = -EMSGSIZE;
654 goto alloc_dmem_failed;
655 }
656
657 if (etd->dmem_size == 0) {
658 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
659 if (etd->dmem_offset < 0) {
660 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
661 ret = -EAGAIN;
662 goto alloc_dmem_failed;
663 }
664 etd->dmem_size = maxpacket;
665 }
666 }
667
668 /* calculate frame */
669 cur_frame = imx21_hc_get_frame(hcd);
670 if (urb->transfer_flags & URB_ISO_ASAP) {
671 if (list_empty(&ep_priv->td_list))
672 urb->start_frame = cur_frame + 5;
673 else
674 urb->start_frame = list_entry(
675 ep_priv->td_list.prev,
676 struct td, list)->frame + urb->interval;
677 }
678 urb->start_frame = wrap_frame(urb->start_frame);
679 if (frame_after(cur_frame, urb->start_frame)) {
680 dev_dbg(imx21->dev,
681 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
682 urb->start_frame, cur_frame,
683 (urb->transfer_flags & URB_ISO_ASAP) != 0);
684 urb->start_frame = wrap_frame(cur_frame + 1);
685 }
686
687 /* set up transfers */
688 td = urb_priv->isoc_td;
689 for (i = 0; i < urb->number_of_packets; i++, td++) {
690 td->ep = ep;
691 td->urb = urb;
692 td->len = urb->iso_frame_desc[i].length;
693 td->isoc_index = i;
694 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
695 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
696 list_add_tail(&td->list, &ep_priv->td_list);
697 }
698
699 urb_priv->isoc_remaining = urb->number_of_packets;
700 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
701 urb->number_of_packets, urb->start_frame, td->frame);
702
703 debug_urb_submitted(imx21, urb);
704 schedule_isoc_etds(hcd, ep);
705
706 spin_unlock_irqrestore(&imx21->lock, flags);
707 return 0;
708
709alloc_dmem_failed:
710 usb_hcd_unlink_urb_from_ep(hcd, urb);
711
712link_failed:
713alloc_ep_failed:
714 spin_unlock_irqrestore(&imx21->lock, flags);
715 kfree(urb_priv->isoc_td);
716
717alloc_td_failed:
718 kfree(urb_priv);
719 return ret;
720}
721
722static void dequeue_isoc_urb(struct imx21 *imx21,
723 struct urb *urb, struct ep_priv *ep_priv)
724{
725 struct urb_priv *urb_priv = urb->hcpriv;
726 struct td *td, *tmp;
727 int i;
728
729 if (urb_priv->active) {
730 for (i = 0; i < NUM_ISO_ETDS; i++) {
731 int etd_num = ep_priv->etd[i];
732 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
733 struct etd_priv *etd = imx21->etd + etd_num;
734
735 reset_etd(imx21, etd_num);
736 if (etd->dmem_size)
737 free_dmem(imx21, etd->dmem_offset);
738 etd->dmem_size = 0;
739 }
740 }
741 }
742
743 list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
744 if (td->urb == urb) {
745 dev_vdbg(imx21->dev, "removing td %p\n", td);
746 list_del(&td->list);
747 }
748 }
749}
750
751/* =========================================== */
752/* NON ISOC Handling ... */
753/* =========================================== */
754
755static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
756{
757 unsigned int pipe = urb->pipe;
758 struct urb_priv *urb_priv = urb->hcpriv;
759 struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
760 int state = urb_priv->state;
761 int etd_num = ep_priv->etd[0];
762 struct etd_priv *etd;
763 int dmem_offset;
764 u32 count;
765 u16 etd_buf_size;
766 u16 maxpacket;
767 u8 dir;
768 u8 bufround;
769 u8 datatoggle;
770 u8 interval = 0;
771 u8 relpolpos = 0;
772
773 if (etd_num < 0) {
774 dev_err(imx21->dev, "No valid ETD\n");
775 return;
776 }
777 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
778 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
779
780 etd = &imx21->etd[etd_num];
781 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
782 if (!maxpacket)
783 maxpacket = 8;
784
785 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
786 if (state == US_CTRL_SETUP) {
787 dir = TD_DIR_SETUP;
788 etd->dma_handle = urb->setup_dma;
789 bufround = 0;
790 count = 8;
791 datatoggle = TD_TOGGLE_DATA0;
792 } else { /* US_CTRL_ACK */
793 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
794 etd->dma_handle = urb->transfer_dma;
795 bufround = 0;
796 count = 0;
797 datatoggle = TD_TOGGLE_DATA1;
798 }
799 } else {
800 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
801 bufround = (dir == TD_DIR_IN) ? 1 : 0;
802 etd->dma_handle = urb->transfer_dma;
803 if (usb_pipebulk(pipe) && (state == US_BULK0))
804 count = 0;
805 else
806 count = urb->transfer_buffer_length;
807
808 if (usb_pipecontrol(pipe)) {
809 datatoggle = TD_TOGGLE_DATA1;
810 } else {
811 if (usb_gettoggle(
812 urb->dev,
813 usb_pipeendpoint(urb->pipe),
814 usb_pipeout(urb->pipe)))
815 datatoggle = TD_TOGGLE_DATA1;
816 else
817 datatoggle = TD_TOGGLE_DATA0;
818 }
819 }
820
821 etd->urb = urb;
822 etd->ep = urb_priv->ep;
823 etd->len = count;
824
825 if (usb_pipeint(pipe)) {
826 interval = urb->interval;
827 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
828 }
829
830 /* Write ETD to device memory */
831 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
832
833 etd_writel(imx21, etd_num, 2,
834 (u32) interval << DW2_POLINTERV |
835 ((u32) relpolpos << DW2_RELPOLPOS) |
836 ((u32) dir << DW2_DIRPID) |
837 ((u32) bufround << DW2_BUFROUND) |
838 ((u32) datatoggle << DW2_DATATOG) |
839 ((u32) TD_NOTACCESSED << DW2_COMPCODE));
840
841 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
842 is smaller. Make sure we don't overrun the buffer!
843 */
844 if (count && count < maxpacket)
845 etd_buf_size = count;
846 else
847 etd_buf_size = maxpacket;
848
849 etd_writel(imx21, etd_num, 3,
850 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
851
852 if (!count)
853 etd->dma_handle = 0;
854
855 /* allocate x and y buffer space at once */
856 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
857 dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
858 if (dmem_offset < 0) {
859 /* Setup everything we can in HW and update when we get DMEM */
860 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
861
862 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
863 debug_urb_queued_for_dmem(imx21, urb);
864 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
865 return;
866 }
867
868 etd_writel(imx21, etd_num, 1,
869 (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
870 (u32) dmem_offset);
871
872 urb_priv->active = 1;
873
874 /* enable the ETD to kick off transfer */
875 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
876 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
877 activate_etd(imx21, etd_num, etd->dma_handle, dir);
878
879}
880
881static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
882{
883 struct imx21 *imx21 = hcd_to_imx21(hcd);
884 struct etd_priv *etd = &imx21->etd[etd_num];
885 u32 etd_mask = 1 << etd_num;
886 struct urb_priv *urb_priv = urb->hcpriv;
887 int dir;
888 u16 xbufaddr;
889 int cc;
890 u32 bytes_xfrd;
891 int etd_done;
892
893 disactivate_etd(imx21, etd_num);
894
895 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
896 xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
897 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
898 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
899
900 /* save toggle carry */
901 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
902 usb_pipeout(urb->pipe),
903 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
904
905 if (dir == TD_DIR_IN) {
906 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
907 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
908 }
909 free_dmem(imx21, xbufaddr);
910
911 urb->error_count = 0;
912 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
913 && (cc == TD_DATAUNDERRUN))
914 cc = TD_CC_NOERROR;
915
916 if (cc != 0)
917 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
918
919 etd_done = (cc_to_error[cc] != 0); /* stop if error */
920
921 switch (usb_pipetype(urb->pipe)) {
922 case PIPE_CONTROL:
923 switch (urb_priv->state) {
924 case US_CTRL_SETUP:
925 if (urb->transfer_buffer_length > 0)
926 urb_priv->state = US_CTRL_DATA;
927 else
928 urb_priv->state = US_CTRL_ACK;
929 break;
930 case US_CTRL_DATA:
931 urb->actual_length += bytes_xfrd;
932 urb_priv->state = US_CTRL_ACK;
933 break;
934 case US_CTRL_ACK:
935 etd_done = 1;
936 break;
937 default:
938 dev_err(imx21->dev,
939 "Invalid pipe state %d\n", urb_priv->state);
940 etd_done = 1;
941 break;
942 }
943 break;
944
945 case PIPE_BULK:
946 urb->actual_length += bytes_xfrd;
947 if ((urb_priv->state == US_BULK)
948 && (urb->transfer_flags & URB_ZERO_PACKET)
949 && urb->transfer_buffer_length > 0
950 && ((urb->transfer_buffer_length %
951 usb_maxpacket(urb->dev, urb->pipe,
952 usb_pipeout(urb->pipe))) == 0)) {
953 /* need a 0-packet */
954 urb_priv->state = US_BULK0;
955 } else {
956 etd_done = 1;
957 }
958 break;
959
960 case PIPE_INTERRUPT:
961 urb->actual_length += bytes_xfrd;
962 etd_done = 1;
963 break;
964 }
965
966 if (!etd_done) {
967 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
968 schedule_nonisoc_etd(imx21, urb);
969 } else {
970 struct usb_host_endpoint *ep = urb->ep;
971
972 urb_done(hcd, urb, cc_to_error[cc]);
973 etd->urb = NULL;
974
975 if (!list_empty(&ep->urb_list)) {
976 urb = list_first_entry(&ep->urb_list,
977 struct urb, urb_list);
978 dev_vdbg(imx21->dev, "next URB %p\n", urb);
979 schedule_nonisoc_etd(imx21, urb);
980 }
981 }
982}
983
984static struct ep_priv *alloc_ep(void)
985{
986 int i;
987 struct ep_priv *ep_priv;
988
989 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
990 if (!ep_priv)
991 return NULL;
992
993 for (i = 0; i < NUM_ISO_ETDS; ++i)
994 ep_priv->etd[i] = -1;
995
996 return ep_priv;
997}
998
999static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1000 struct urb *urb, gfp_t mem_flags)
1001{
1002 struct imx21 *imx21 = hcd_to_imx21(hcd);
1003 struct usb_host_endpoint *ep = urb->ep;
1004 struct urb_priv *urb_priv;
1005 struct ep_priv *ep_priv;
1006 struct etd_priv *etd;
1007 int ret;
1008 unsigned long flags;
1009 int new_ep = 0;
1010
1011 dev_vdbg(imx21->dev,
1012 "enqueue urb=%p ep=%p len=%d "
1013 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1014 urb, ep,
1015 urb->transfer_buffer_length,
1016 urb->transfer_buffer, urb->transfer_dma,
1017 urb->setup_packet, urb->setup_dma);
1018
1019 if (usb_pipeisoc(urb->pipe))
1020 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1021
1022 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1023 if (!urb_priv)
1024 return -ENOMEM;
1025
1026 spin_lock_irqsave(&imx21->lock, flags);
1027
1028 ep_priv = ep->hcpriv;
1029 if (ep_priv == NULL) {
1030 ep_priv = alloc_ep();
1031 if (!ep_priv) {
1032 ret = -ENOMEM;
1033 goto failed_alloc_ep;
1034 }
1035 ep->hcpriv = ep_priv;
1036 ep_priv->ep = ep;
1037 new_ep = 1;
1038 }
1039
1040 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1041 if (ret)
1042 goto failed_link;
1043
1044 urb->status = -EINPROGRESS;
1045 urb->actual_length = 0;
1046 urb->error_count = 0;
1047 urb->hcpriv = urb_priv;
1048 urb_priv->ep = ep;
1049
1050 switch (usb_pipetype(urb->pipe)) {
1051 case PIPE_CONTROL:
1052 urb_priv->state = US_CTRL_SETUP;
1053 break;
1054 case PIPE_BULK:
1055 urb_priv->state = US_BULK;
1056 break;
1057 }
1058
1059 debug_urb_submitted(imx21, urb);
1060 if (ep_priv->etd[0] < 0) {
1061 if (ep_priv->waiting_etd) {
1062 dev_dbg(imx21->dev,
1063 "no ETD available already queued %p\n",
1064 ep_priv);
1065 debug_urb_queued_for_etd(imx21, urb);
1066 goto out;
1067 }
1068 ep_priv->etd[0] = alloc_etd(imx21);
1069 if (ep_priv->etd[0] < 0) {
1070 dev_dbg(imx21->dev,
1071 "no ETD available queueing %p\n", ep_priv);
1072 debug_urb_queued_for_etd(imx21, urb);
1073 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1074 ep_priv->waiting_etd = 1;
1075 goto out;
1076 }
1077 }
1078
1079 /* Schedule if no URB already active for this endpoint */
1080 etd = &imx21->etd[ep_priv->etd[0]];
1081 if (etd->urb == NULL) {
1082 DEBUG_LOG_FRAME(imx21, etd, last_req);
1083 schedule_nonisoc_etd(imx21, urb);
1084 }
1085
1086out:
1087 spin_unlock_irqrestore(&imx21->lock, flags);
1088 return 0;
1089
1090failed_link:
1091failed_alloc_ep:
1092 spin_unlock_irqrestore(&imx21->lock, flags);
1093 kfree(urb_priv);
1094 return ret;
1095}
1096
1097static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1098 int status)
1099{
1100 struct imx21 *imx21 = hcd_to_imx21(hcd);
1101 unsigned long flags;
1102 struct usb_host_endpoint *ep;
1103 struct ep_priv *ep_priv;
1104 struct urb_priv *urb_priv = urb->hcpriv;
1105 int ret = -EINVAL;
1106
1107 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1108 urb, usb_pipeisoc(urb->pipe), status);
1109
1110 spin_lock_irqsave(&imx21->lock, flags);
1111
1112 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1113 if (ret)
1114 goto fail;
1115 ep = urb_priv->ep;
1116 ep_priv = ep->hcpriv;
1117
1118 debug_urb_unlinked(imx21, urb);
1119
1120 if (usb_pipeisoc(urb->pipe)) {
1121 dequeue_isoc_urb(imx21, urb, ep_priv);
1122 schedule_isoc_etds(hcd, ep);
1123 } else if (urb_priv->active) {
1124 int etd_num = ep_priv->etd[0];
1125 if (etd_num != -1) {
1126 disactivate_etd(imx21, etd_num);
1127 free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff);
1128 imx21->etd[etd_num].urb = NULL;
1129 }
1130 }
1131
1132 urb_done(hcd, urb, status);
1133
1134 spin_unlock_irqrestore(&imx21->lock, flags);
1135 return 0;
1136
1137fail:
1138 spin_unlock_irqrestore(&imx21->lock, flags);
1139 return ret;
1140}
1141
1142/* =========================================== */
1143/* Interrupt dispatch */
1144/* =========================================== */
1145
1146static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1147{
1148 int etd_num;
1149 int enable_sof_int = 0;
1150 unsigned long flags;
1151
1152 spin_lock_irqsave(&imx21->lock, flags);
1153
1154 for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1155 u32 etd_mask = 1 << etd_num;
1156 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1157 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1158 struct etd_priv *etd = &imx21->etd[etd_num];
1159
1160
1161 if (done) {
1162 DEBUG_LOG_FRAME(imx21, etd, last_int);
1163 } else {
1164/*
1165 * Kludge warning!
1166 *
1167 * When multiple transfers are using the bus we sometimes get into a state
1168 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1169 * the ETD has self disabled but the ETDDONESTAT flag is not set
1170 * (and hence no interrupt occurs).
1171 * This causes the transfer in question to hang.
1172 * The kludge below checks for this condition at each SOF and processes any
1173 * blocked ETDs (after an arbitary 10 frame wait)
1174 *
1175 * With a single active transfer the usbtest test suite will run for days
1176 * without the kludge.
1177 * With other bus activity (eg mass storage) even just test1 will hang without
1178 * the kludge.
1179 */
1180 u32 dword0;
1181 int cc;
1182
1183 if (etd->active_count && !enabled) /* suspicious... */
1184 enable_sof_int = 1;
1185
1186 if (!sof || enabled || !etd->active_count)
1187 continue;
1188
1189 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1190 if (cc == TD_NOTACCESSED)
1191 continue;
1192
1193 if (++etd->active_count < 10)
1194 continue;
1195
1196 dword0 = etd_readl(imx21, etd_num, 0);
1197 dev_dbg(imx21->dev,
1198 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1199 etd_num, dword0 & 0x7F,
1200 (dword0 >> DW0_ENDPNT) & 0x0F,
1201 cc);
1202
1203#ifdef DEBUG
1204 dev_dbg(imx21->dev,
1205 "frame: act=%d disact=%d"
1206 " int=%d req=%d cur=%d\n",
1207 etd->activated_frame,
1208 etd->disactivated_frame,
1209 etd->last_int_frame,
1210 etd->last_req_frame,
1211 readl(imx21->regs + USBH_FRMNUB));
1212 imx21->debug_unblocks++;
1213#endif
1214 etd->active_count = 0;
1215/* End of kludge */
1216 }
1217
1218 if (etd->ep == NULL || etd->urb == NULL) {
1219 dev_dbg(imx21->dev,
1220 "Interrupt for unexpected etd %d"
1221 " ep=%p urb=%p\n",
1222 etd_num, etd->ep, etd->urb);
1223 disactivate_etd(imx21, etd_num);
1224 continue;
1225 }
1226
1227 if (usb_pipeisoc(etd->urb->pipe))
1228 isoc_etd_done(hcd, etd->urb, etd_num);
1229 else
1230 nonisoc_etd_done(hcd, etd->urb, etd_num);
1231 }
1232
1233 /* only enable SOF interrupt if it may be needed for the kludge */
1234 if (enable_sof_int)
1235 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1236 else
1237 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1238
1239
1240 spin_unlock_irqrestore(&imx21->lock, flags);
1241}
1242
1243static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1244{
1245 struct imx21 *imx21 = hcd_to_imx21(hcd);
1246 u32 ints = readl(imx21->regs + USBH_SYSISR);
1247
1248 if (ints & USBH_SYSIEN_HERRINT)
1249 dev_dbg(imx21->dev, "Scheduling error\n");
1250
1251 if (ints & USBH_SYSIEN_SORINT)
1252 dev_dbg(imx21->dev, "Scheduling overrun\n");
1253
1254 if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1255 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1256
1257 writel(ints, imx21->regs + USBH_SYSISR);
1258 return IRQ_HANDLED;
1259}
1260
1261static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1262 struct usb_host_endpoint *ep)
1263{
1264 struct imx21 *imx21 = hcd_to_imx21(hcd);
1265 unsigned long flags;
1266 struct ep_priv *ep_priv;
1267 int i;
1268
1269 if (ep == NULL)
1270 return;
1271
1272 spin_lock_irqsave(&imx21->lock, flags);
1273 ep_priv = ep->hcpriv;
1274 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1275
1276 if (!list_empty(&ep->urb_list))
1277 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1278
1279 if (ep_priv != NULL) {
1280 for (i = 0; i < NUM_ISO_ETDS; i++) {
1281 if (ep_priv->etd[i] > -1)
1282 dev_dbg(imx21->dev, "free etd %d for disable\n",
1283 ep_priv->etd[i]);
1284
1285 free_etd(imx21, ep_priv->etd[i]);
1286 }
1287 kfree(ep_priv);
1288 ep->hcpriv = NULL;
1289 }
1290
1291 for (i = 0; i < USB_NUM_ETD; i++) {
1292 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1293 dev_err(imx21->dev,
1294 "Active etd %d for disabled ep=%p!\n", i, ep);
1295 free_etd(imx21, i);
1296 }
1297 }
1298 free_epdmem(imx21, ep);
1299 spin_unlock_irqrestore(&imx21->lock, flags);
1300}
1301
1302/* =========================================== */
1303/* Hub handling */
1304/* =========================================== */
1305
1306static int get_hub_descriptor(struct usb_hcd *hcd,
1307 struct usb_hub_descriptor *desc)
1308{
1309 struct imx21 *imx21 = hcd_to_imx21(hcd);
1310 desc->bDescriptorType = 0x29; /* HUB descriptor */
1311 desc->bHubContrCurrent = 0;
1312
1313 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1314 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1315 desc->bDescLength = 9;
1316 desc->bPwrOn2PwrGood = 0;
1317 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1318 0x0002 | /* No power switching */
1319 0x0010 | /* No over current protection */
1320 0);
1321
1322 desc->bitmap[0] = 1 << 1;
1323 desc->bitmap[1] = ~0;
1324 return 0;
1325}
1326
1327static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1328{
1329 struct imx21 *imx21 = hcd_to_imx21(hcd);
1330 int ports;
1331 int changed = 0;
1332 int i;
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&imx21->lock, flags);
1336 ports = readl(imx21->regs + USBH_ROOTHUBA)
1337 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1338 if (ports > 7) {
1339 ports = 7;
1340 dev_err(imx21->dev, "ports %d > 7\n", ports);
1341 }
1342 for (i = 0; i < ports; i++) {
1343 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1344 (USBH_PORTSTAT_CONNECTSC |
1345 USBH_PORTSTAT_PRTENBLSC |
1346 USBH_PORTSTAT_PRTSTATSC |
1347 USBH_PORTSTAT_OVRCURIC |
1348 USBH_PORTSTAT_PRTRSTSC)) {
1349
1350 changed = 1;
1351 buf[0] |= 1 << (i + 1);
1352 }
1353 }
1354 spin_unlock_irqrestore(&imx21->lock, flags);
1355
1356 if (changed)
1357 dev_info(imx21->dev, "Hub status changed\n");
1358 return changed;
1359}
1360
1361static int imx21_hc_hub_control(struct usb_hcd *hcd,
1362 u16 typeReq,
1363 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1364{
1365 struct imx21 *imx21 = hcd_to_imx21(hcd);
1366 int rc = 0;
1367 u32 status_write = 0;
1368
1369 switch (typeReq) {
1370 case ClearHubFeature:
1371 dev_dbg(imx21->dev, "ClearHubFeature\n");
1372 switch (wValue) {
1373 case C_HUB_OVER_CURRENT:
1374 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1375 break;
1376 case C_HUB_LOCAL_POWER:
1377 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1378 break;
1379 default:
1380 dev_dbg(imx21->dev, " unknown\n");
1381 rc = -EINVAL;
1382 break;
1383 }
1384 break;
1385
1386 case ClearPortFeature:
1387 dev_dbg(imx21->dev, "ClearPortFeature\n");
1388 switch (wValue) {
1389 case USB_PORT_FEAT_ENABLE:
1390 dev_dbg(imx21->dev, " ENABLE\n");
1391 status_write = USBH_PORTSTAT_CURCONST;
1392 break;
1393 case USB_PORT_FEAT_SUSPEND:
1394 dev_dbg(imx21->dev, " SUSPEND\n");
1395 status_write = USBH_PORTSTAT_PRTOVRCURI;
1396 break;
1397 case USB_PORT_FEAT_POWER:
1398 dev_dbg(imx21->dev, " POWER\n");
1399 status_write = USBH_PORTSTAT_LSDEVCON;
1400 break;
1401 case USB_PORT_FEAT_C_ENABLE:
1402 dev_dbg(imx21->dev, " C_ENABLE\n");
1403 status_write = USBH_PORTSTAT_PRTENBLSC;
1404 break;
1405 case USB_PORT_FEAT_C_SUSPEND:
1406 dev_dbg(imx21->dev, " C_SUSPEND\n");
1407 status_write = USBH_PORTSTAT_PRTSTATSC;
1408 break;
1409 case USB_PORT_FEAT_C_CONNECTION:
1410 dev_dbg(imx21->dev, " C_CONNECTION\n");
1411 status_write = USBH_PORTSTAT_CONNECTSC;
1412 break;
1413 case USB_PORT_FEAT_C_OVER_CURRENT:
1414 dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
1415 status_write = USBH_PORTSTAT_OVRCURIC;
1416 break;
1417 case USB_PORT_FEAT_C_RESET:
1418 dev_dbg(imx21->dev, " C_RESET\n");
1419 status_write = USBH_PORTSTAT_PRTRSTSC;
1420 break;
1421 default:
1422 dev_dbg(imx21->dev, " unknown\n");
1423 rc = -EINVAL;
1424 break;
1425 }
1426
1427 break;
1428
1429 case GetHubDescriptor:
1430 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1431 rc = get_hub_descriptor(hcd, (void *)buf);
1432 break;
1433
1434 case GetHubStatus:
1435 dev_dbg(imx21->dev, " GetHubStatus\n");
1436 *(__le32 *) buf = 0;
1437 break;
1438
1439 case GetPortStatus:
1440 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1441 wIndex, USBH_PORTSTAT(wIndex - 1));
1442 *(__le32 *) buf = readl(imx21->regs +
1443 USBH_PORTSTAT(wIndex - 1));
1444 break;
1445
1446 case SetHubFeature:
1447 dev_dbg(imx21->dev, "SetHubFeature\n");
1448 switch (wValue) {
1449 case C_HUB_OVER_CURRENT:
1450 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1451 break;
1452
1453 case C_HUB_LOCAL_POWER:
1454 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1455 break;
1456 default:
1457 dev_dbg(imx21->dev, " unknown\n");
1458 rc = -EINVAL;
1459 break;
1460 }
1461
1462 break;
1463
1464 case SetPortFeature:
1465 dev_dbg(imx21->dev, "SetPortFeature\n");
1466 switch (wValue) {
1467 case USB_PORT_FEAT_SUSPEND:
1468 dev_dbg(imx21->dev, " SUSPEND\n");
1469 status_write = USBH_PORTSTAT_PRTSUSPST;
1470 break;
1471 case USB_PORT_FEAT_POWER:
1472 dev_dbg(imx21->dev, " POWER\n");
1473 status_write = USBH_PORTSTAT_PRTPWRST;
1474 break;
1475 case USB_PORT_FEAT_RESET:
1476 dev_dbg(imx21->dev, " RESET\n");
1477 status_write = USBH_PORTSTAT_PRTRSTST;
1478 break;
1479 default:
1480 dev_dbg(imx21->dev, " unknown\n");
1481 rc = -EINVAL;
1482 break;
1483 }
1484 break;
1485
1486 default:
1487 dev_dbg(imx21->dev, " unknown\n");
1488 rc = -EINVAL;
1489 break;
1490 }
1491
1492 if (status_write)
1493 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1494 return rc;
1495}
1496
1497/* =========================================== */
1498/* Host controller management */
1499/* =========================================== */
1500
1501static int imx21_hc_reset(struct usb_hcd *hcd)
1502{
1503 struct imx21 *imx21 = hcd_to_imx21(hcd);
1504 unsigned long timeout;
1505 unsigned long flags;
1506
1507 spin_lock_irqsave(&imx21->lock, flags);
1508
1509 /* Reset the Host controler modules */
1510 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1511 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1512 imx21->regs + USBOTG_RST_CTRL);
1513
1514 /* Wait for reset to finish */
1515 timeout = jiffies + HZ;
1516 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1517 if (time_after(jiffies, timeout)) {
1518 spin_unlock_irqrestore(&imx21->lock, flags);
1519 dev_err(imx21->dev, "timeout waiting for reset\n");
1520 return -ETIMEDOUT;
1521 }
1522 spin_unlock_irq(&imx21->lock);
1523 schedule_timeout(1);
1524 spin_lock_irq(&imx21->lock);
1525 }
1526 spin_unlock_irqrestore(&imx21->lock, flags);
1527 return 0;
1528}
1529
1530static int __devinit imx21_hc_start(struct usb_hcd *hcd)
1531{
1532 struct imx21 *imx21 = hcd_to_imx21(hcd);
1533 unsigned long flags;
1534 int i, j;
1535 u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1536 u32 usb_control = 0;
1537
1538 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1539 USBOTG_HWMODE_HOSTXCVR_MASK);
1540 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1541 USBOTG_HWMODE_OTGXCVR_MASK);
1542
1543 if (imx21->pdata->host1_txenoe)
1544 usb_control |= USBCTRL_HOST1_TXEN_OE;
1545
1546 if (!imx21->pdata->host1_xcverless)
1547 usb_control |= USBCTRL_HOST1_BYP_TLL;
1548
1549 if (imx21->pdata->otg_ext_xcvr)
1550 usb_control |= USBCTRL_OTC_RCV_RXDP;
1551
1552
1553 spin_lock_irqsave(&imx21->lock, flags);
1554
1555 writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1556 imx21->regs + USBOTG_CLK_CTRL);
1557 writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1558 writel(usb_control, imx21->regs + USBCTRL);
1559 writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
1560 imx21->regs + USB_MISCCONTROL);
1561
1562 /* Clear the ETDs */
1563 for (i = 0; i < USB_NUM_ETD; i++)
1564 for (j = 0; j < 4; j++)
1565 etd_writel(imx21, i, j, 0);
1566
1567 /* Take the HC out of reset */
1568 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1569 imx21->regs + USBH_HOST_CTRL);
1570
1571 /* Enable ports */
1572 if (imx21->pdata->enable_otg_host)
1573 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1574 imx21->regs + USBH_PORTSTAT(0));
1575
1576 if (imx21->pdata->enable_host1)
1577 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1578 imx21->regs + USBH_PORTSTAT(1));
1579
1580 if (imx21->pdata->enable_host2)
1581 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1582 imx21->regs + USBH_PORTSTAT(2));
1583
1584
1585 hcd->state = HC_STATE_RUNNING;
1586
1587 /* Enable host controller interrupts */
1588 set_register_bits(imx21, USBH_SYSIEN,
1589 USBH_SYSIEN_HERRINT |
1590 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1591 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1592
1593 spin_unlock_irqrestore(&imx21->lock, flags);
1594
1595 return 0;
1596}
1597
1598static void imx21_hc_stop(struct usb_hcd *hcd)
1599{
1600 struct imx21 *imx21 = hcd_to_imx21(hcd);
1601 unsigned long flags;
1602
1603 spin_lock_irqsave(&imx21->lock, flags);
1604
1605 writel(0, imx21->regs + USBH_SYSIEN);
1606 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1607 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1608 USBOTG_CLK_CTRL);
1609 spin_unlock_irqrestore(&imx21->lock, flags);
1610}
1611
1612/* =========================================== */
1613/* Driver glue */
1614/* =========================================== */
1615
1616static struct hc_driver imx21_hc_driver = {
1617 .description = hcd_name,
1618 .product_desc = "IMX21 USB Host Controller",
1619 .hcd_priv_size = sizeof(struct imx21),
1620
1621 .flags = HCD_USB11,
1622 .irq = imx21_irq,
1623
1624 .reset = imx21_hc_reset,
1625 .start = imx21_hc_start,
1626 .stop = imx21_hc_stop,
1627
1628 /* I/O requests */
1629 .urb_enqueue = imx21_hc_urb_enqueue,
1630 .urb_dequeue = imx21_hc_urb_dequeue,
1631 .endpoint_disable = imx21_hc_endpoint_disable,
1632
1633 /* scheduling support */
1634 .get_frame_number = imx21_hc_get_frame,
1635
1636 /* Root hub support */
1637 .hub_status_data = imx21_hc_hub_status_data,
1638 .hub_control = imx21_hc_hub_control,
1639
1640};
1641
1642static struct mx21_usbh_platform_data default_pdata = {
1643 .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1644 .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1645 .enable_host1 = 1,
1646 .enable_host2 = 1,
1647 .enable_otg_host = 1,
1648
1649};
1650
1651static int imx21_remove(struct platform_device *pdev)
1652{
1653 struct usb_hcd *hcd = platform_get_drvdata(pdev);
1654 struct imx21 *imx21 = hcd_to_imx21(hcd);
1655 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1656
1657 remove_debug_files(imx21);
1658 usb_remove_hcd(hcd);
1659
1660 if (res != NULL) {
1661 clk_disable(imx21->clk);
1662 clk_put(imx21->clk);
1663 iounmap(imx21->regs);
1664 release_mem_region(res->start, resource_size(res));
1665 }
1666
1667 kfree(hcd);
1668 return 0;
1669}
1670
1671
1672static int imx21_probe(struct platform_device *pdev)
1673{
1674 struct usb_hcd *hcd;
1675 struct imx21 *imx21;
1676 struct resource *res;
1677 int ret;
1678 int irq;
1679
1680 printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1681
1682 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1683 if (!res)
1684 return -ENODEV;
1685 irq = platform_get_irq(pdev, 0);
1686 if (irq < 0)
1687 return -ENXIO;
1688
1689 hcd = usb_create_hcd(&imx21_hc_driver,
1690 &pdev->dev, dev_name(&pdev->dev));
1691 if (hcd == NULL) {
1692 dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1693 dev_name(&pdev->dev));
1694 return -ENOMEM;
1695 }
1696
1697 imx21 = hcd_to_imx21(hcd);
1698 imx21->dev = &pdev->dev;
1699 imx21->pdata = pdev->dev.platform_data;
1700 if (!imx21->pdata)
1701 imx21->pdata = &default_pdata;
1702
1703 spin_lock_init(&imx21->lock);
1704 INIT_LIST_HEAD(&imx21->dmem_list);
1705 INIT_LIST_HEAD(&imx21->queue_for_etd);
1706 INIT_LIST_HEAD(&imx21->queue_for_dmem);
1707 create_debug_files(imx21);
1708
1709 res = request_mem_region(res->start, resource_size(res), hcd_name);
1710 if (!res) {
1711 ret = -EBUSY;
1712 goto failed_request_mem;
1713 }
1714
1715 imx21->regs = ioremap(res->start, resource_size(res));
1716 if (imx21->regs == NULL) {
1717 dev_err(imx21->dev, "Cannot map registers\n");
1718 ret = -ENOMEM;
1719 goto failed_ioremap;
1720 }
1721
1722 /* Enable clocks source */
1723 imx21->clk = clk_get(imx21->dev, NULL);
1724 if (IS_ERR(imx21->clk)) {
1725 dev_err(imx21->dev, "no clock found\n");
1726 ret = PTR_ERR(imx21->clk);
1727 goto failed_clock_get;
1728 }
1729
1730 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1731 if (ret)
1732 goto failed_clock_set;
1733 ret = clk_enable(imx21->clk);
1734 if (ret)
1735 goto failed_clock_enable;
1736
1737 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1738 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1739
1740 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
1741 if (ret != 0) {
1742 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1743 goto failed_add_hcd;
1744 }
1745
1746 return 0;
1747
1748failed_add_hcd:
1749 clk_disable(imx21->clk);
1750failed_clock_enable:
1751failed_clock_set:
1752 clk_put(imx21->clk);
1753failed_clock_get:
1754 iounmap(imx21->regs);
1755failed_ioremap:
1756 release_mem_region(res->start, res->end - res->start);
1757failed_request_mem:
1758 remove_debug_files(imx21);
1759 usb_put_hcd(hcd);
1760 return ret;
1761}
1762
1763static struct platform_driver imx21_hcd_driver = {
1764 .driver = {
1765 .name = (char *)hcd_name,
1766 },
1767 .probe = imx21_probe,
1768 .remove = imx21_remove,
1769 .suspend = NULL,
1770 .resume = NULL,
1771};
1772
1773static int __init imx21_hcd_init(void)
1774{
1775 return platform_driver_register(&imx21_hcd_driver);
1776}
1777
1778static void __exit imx21_hcd_cleanup(void)
1779{
1780 platform_driver_unregister(&imx21_hcd_driver);
1781}
1782
1783module_init(imx21_hcd_init);
1784module_exit(imx21_hcd_cleanup);
1785
1786MODULE_DESCRIPTION("i.MX21 USB Host controller");
1787MODULE_AUTHOR("Martin Fuzzey");
1788MODULE_LICENSE("GPL");
1789MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 000000000000..1b0d913780a5
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,436 @@
1/*
2 * Macros and prototypes for i.MX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#ifndef __LINUX_IMX21_HCD_H__
25#define __LINUX_IMX21_HCD_H__
26
27#include <mach/mx21-usbhost.h>
28
29#define NUM_ISO_ETDS 2
30#define USB_NUM_ETD 32
31#define DMEM_SIZE 4096
32
33/* Register definitions */
34#define USBOTG_HWMODE 0x00
35#define USBOTG_HWMODE_ANASDBEN (1 << 14)
36#define USBOTG_HWMODE_OTGXCVR_SHIFT 6
37#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6)
38#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6)
39#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6)
40#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6)
41#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6)
42#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4
43#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4)
44#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4)
45#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4)
46#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4)
47#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4)
48#define USBOTG_HWMODE_CRECFG_MASK (3 << 0)
49#define USBOTG_HWMODE_CRECFG_HOST (1 << 0)
50#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0)
51#define USBOTG_HWMODE_CRECFG_HNP (3 << 0)
52
53#define USBOTG_CINT_STAT 0x04
54#define USBOTG_CINT_STEN 0x08
55#define USBOTG_ASHNPINT (1 << 5)
56#define USBOTG_ASFCINT (1 << 4)
57#define USBOTG_ASHCINT (1 << 3)
58#define USBOTG_SHNPINT (1 << 2)
59#define USBOTG_FCINT (1 << 1)
60#define USBOTG_HCINT (1 << 0)
61
62#define USBOTG_CLK_CTRL 0x0c
63#define USBOTG_CLK_CTRL_FUNC (1 << 2)
64#define USBOTG_CLK_CTRL_HST (1 << 1)
65#define USBOTG_CLK_CTRL_MAIN (1 << 0)
66
67#define USBOTG_RST_CTRL 0x10
68#define USBOTG_RST_RSTI2C (1 << 15)
69#define USBOTG_RST_RSTCTRL (1 << 5)
70#define USBOTG_RST_RSTFC (1 << 4)
71#define USBOTG_RST_RSTFSKE (1 << 3)
72#define USBOTG_RST_RSTRH (1 << 2)
73#define USBOTG_RST_RSTHSIE (1 << 1)
74#define USBOTG_RST_RSTHC (1 << 0)
75
76#define USBOTG_FRM_INTVL 0x14
77#define USBOTG_FRM_REMAIN 0x18
78#define USBOTG_HNP_CSR 0x1c
79#define USBOTG_HNP_ISR 0x2c
80#define USBOTG_HNP_IEN 0x30
81
82#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x))
83#define USBOTG_I2C_XCVR_DEVAD 0x118
84#define USBOTG_I2C_SEQ_OP_REG 0x119
85#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a
86#define USBOTG_I2C_OP_CTRL_REG 0x11b
87#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e
88#define USBOTG_I2C_MASTER_INT_REG 0x11f
89
90#define USBH_HOST_CTRL 0x80
91#define USBH_HOST_CTRL_HCRESET (1 << 31)
92#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16)
93#define USBH_HOST_CTRL_RMTWUEN (1 << 4)
94#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2)
95#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2)
96#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2)
97#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2)
98#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0)
99#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0)
100#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0)
101#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0)
102
103#define USBH_SYSISR 0x88
104#define USBH_SYSISR_PSCINT (1 << 6)
105#define USBH_SYSISR_FMOFINT (1 << 5)
106#define USBH_SYSISR_HERRINT (1 << 4)
107#define USBH_SYSISR_RESDETINT (1 << 3)
108#define USBH_SYSISR_SOFINT (1 << 2)
109#define USBH_SYSISR_DONEINT (1 << 1)
110#define USBH_SYSISR_SORINT (1 << 0)
111
112#define USBH_SYSIEN 0x8c
113#define USBH_SYSIEN_PSCINT (1 << 6)
114#define USBH_SYSIEN_FMOFINT (1 << 5)
115#define USBH_SYSIEN_HERRINT (1 << 4)
116#define USBH_SYSIEN_RESDETINT (1 << 3)
117#define USBH_SYSIEN_SOFINT (1 << 2)
118#define USBH_SYSIEN_DONEINT (1 << 1)
119#define USBH_SYSIEN_SORINT (1 << 0)
120
121#define USBH_XBUFSTAT 0x98
122#define USBH_YBUFSTAT 0x9c
123#define USBH_XYINTEN 0xa0
124#define USBH_XFILLSTAT 0xa8
125#define USBH_YFILLSTAT 0xac
126#define USBH_ETDENSET 0xc0
127#define USBH_ETDENCLR 0xc4
128#define USBH_IMMEDINT 0xcc
129#define USBH_ETDDONESTAT 0xd0
130#define USBH_ETDDONEEN 0xd4
131#define USBH_FRMNUB 0xe0
132#define USBH_LSTHRESH 0xe4
133
134#define USBH_ROOTHUBA 0xe8
135#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff)
136#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24)
137#define USBH_ROOTHUBA_NOOVRCURP (1 << 12)
138#define USBH_ROOTHUBA_OVRCURPM (1 << 11)
139#define USBH_ROOTHUBA_DEVTYPE (1 << 10)
140#define USBH_ROOTHUBA_PWRSWTMD (1 << 9)
141#define USBH_ROOTHUBA_NOPWRSWT (1 << 8)
142#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff)
143
144#define USBH_ROOTHUBB 0xec
145#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16))
146#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x))
147
148#define USBH_ROOTSTAT 0xf0
149#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31)
150#define USBH_ROOTSTAT_OVRCURCHG (1 << 17)
151#define USBH_ROOTSTAT_DEVCONWUE (1 << 15)
152#define USBH_ROOTSTAT_OVRCURI (1 << 1)
153#define USBH_ROOTSTAT_LOCPWRS (1 << 0)
154
155#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4))
156#define USBH_PORTSTAT_PRTRSTSC (1 << 20)
157#define USBH_PORTSTAT_OVRCURIC (1 << 19)
158#define USBH_PORTSTAT_PRTSTATSC (1 << 18)
159#define USBH_PORTSTAT_PRTENBLSC (1 << 17)
160#define USBH_PORTSTAT_CONNECTSC (1 << 16)
161#define USBH_PORTSTAT_LSDEVCON (1 << 9)
162#define USBH_PORTSTAT_PRTPWRST (1 << 8)
163#define USBH_PORTSTAT_PRTRSTST (1 << 4)
164#define USBH_PORTSTAT_PRTOVRCURI (1 << 3)
165#define USBH_PORTSTAT_PRTSUSPST (1 << 2)
166#define USBH_PORTSTAT_PRTENABST (1 << 1)
167#define USBH_PORTSTAT_CURCONST (1 << 0)
168
169#define USB_DMAREV 0x800
170#define USB_DMAINTSTAT 0x804
171#define USB_DMAINTSTAT_EPERR (1 << 1)
172#define USB_DMAINTSTAT_ETDERR (1 << 0)
173
174#define USB_DMAINTEN 0x808
175#define USB_DMAINTEN_EPERRINTEN (1 << 1)
176#define USB_DMAINTEN_ETDERRINTEN (1 << 0)
177
178#define USB_ETDDMAERSTAT 0x80c
179#define USB_EPDMAERSTAT 0x810
180#define USB_ETDDMAEN 0x820
181#define USB_EPDMAEN 0x824
182#define USB_ETDDMAXTEN 0x828
183#define USB_EPDMAXTEN 0x82c
184#define USB_ETDDMAENXYT 0x830
185#define USB_EPDMAENXYT 0x834
186#define USB_ETDDMABST4EN 0x838
187#define USB_EPDMABST4EN 0x83c
188
189#define USB_MISCCONTROL 0x840
190#define USB_MISCCONTROL_ISOPREVFRM (1 << 3)
191#define USB_MISCCONTROL_SKPRTRY (1 << 2)
192#define USB_MISCCONTROL_ARBMODE (1 << 1)
193#define USB_MISCCONTROL_FILTCC (1 << 0)
194
195#define USB_ETDDMACHANLCLR 0x848
196#define USB_EPDMACHANLCLR 0x84c
197#define USB_ETDSMSA(x) (0x900 + ((x) * 4))
198#define USB_EPSMSA(x) (0x980 + ((x) * 4))
199#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4))
200#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4))
201
202#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4))
203#define DW0_ADDRESS 0
204#define DW0_ENDPNT 7
205#define DW0_DIRECT 11
206#define DW0_SPEED 13
207#define DW0_FORMAT 14
208#define DW0_MAXPKTSIZ 16
209#define DW0_HALTED 27
210#define DW0_TOGCRY 28
211#define DW0_SNDNAK 30
212
213#define DW1_XBUFSRTAD 0
214#define DW1_YBUFSRTAD 16
215
216#define DW2_RTRYDELAY 0
217#define DW2_POLINTERV 0
218#define DW2_STARTFRM 0
219#define DW2_RELPOLPOS 8
220#define DW2_DIRPID 16
221#define DW2_BUFROUND 18
222#define DW2_DELAYINT 19
223#define DW2_DATATOG 22
224#define DW2_ERRORCNT 24
225#define DW2_COMPCODE 28
226
227#define DW3_TOTBYECNT 0
228#define DW3_PKTLEN0 0
229#define DW3_COMPCODE0 12
230#define DW3_PKTLEN1 16
231#define DW3_BUFSIZE 21
232#define DW3_COMPCODE1 28
233
234#define USBCTRL 0x600
235#define USBCTRL_I2C_WU_INT_STAT (1 << 27)
236#define USBCTRL_OTG_WU_INT_STAT (1 << 26)
237#define USBCTRL_HOST_WU_INT_STAT (1 << 25)
238#define USBCTRL_FNT_WU_INT_STAT (1 << 24)
239#define USBCTRL_I2C_WU_INT_EN (1 << 19)
240#define USBCTRL_OTG_WU_INT_EN (1 << 18)
241#define USBCTRL_HOST_WU_INT_EN (1 << 17)
242#define USBCTRL_FNT_WU_INT_EN (1 << 16)
243#define USBCTRL_OTC_RCV_RXDP (1 << 13)
244#define USBCTRL_HOST1_BYP_TLL (1 << 12)
245#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10)
246#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8)
247#define USBCTRL_OTG_PWR_MASK (1 << 6)
248#define USBCTRL_HOST1_PWR_MASK (1 << 5)
249#define USBCTRL_HOST2_PWR_MASK (1 << 4)
250#define USBCTRL_USB_BYP (1 << 2)
251#define USBCTRL_HOST1_TXEN_OE (1 << 1)
252
253
254/* Values in TD blocks */
255#define TD_DIR_SETUP 0
256#define TD_DIR_OUT 1
257#define TD_DIR_IN 2
258#define TD_FORMAT_CONTROL 0
259#define TD_FORMAT_ISO 1
260#define TD_FORMAT_BULK 2
261#define TD_FORMAT_INT 3
262#define TD_TOGGLE_CARRY 0
263#define TD_TOGGLE_DATA0 2
264#define TD_TOGGLE_DATA1 3
265
266/* control transfer states */
267#define US_CTRL_SETUP 2
268#define US_CTRL_DATA 1
269#define US_CTRL_ACK 0
270
271/* bulk transfer main state and 0-length packet */
272#define US_BULK 1
273#define US_BULK0 0
274
275/*ETD format description*/
276#define IMX_FMT_CTRL 0x0
277#define IMX_FMT_ISO 0x1
278#define IMX_FMT_BULK 0x2
279#define IMX_FMT_INT 0x3
280
281static char fmt_urb_to_etd[4] = {
282/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
283/*PIPE_INTERRUPT*/ IMX_FMT_INT,
284/*PIPE_CONTROL*/ IMX_FMT_CTRL,
285/*PIPE_BULK*/ IMX_FMT_BULK
286};
287
288/* condition (error) CC codes and mapping (OHCI like) */
289
290#define TD_CC_NOERROR 0x00
291#define TD_CC_CRC 0x01
292#define TD_CC_BITSTUFFING 0x02
293#define TD_CC_DATATOGGLEM 0x03
294#define TD_CC_STALL 0x04
295#define TD_DEVNOTRESP 0x05
296#define TD_PIDCHECKFAIL 0x06
297/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/
298#define TD_DATAOVERRUN 0x08
299#define TD_DATAUNDERRUN 0x09
300#define TD_BUFFEROVERRUN 0x0C
301#define TD_BUFFERUNDERRUN 0x0D
302#define TD_SCHEDULEOVERRUN 0x0E
303#define TD_NOTACCESSED 0x0F
304
305static const int cc_to_error[16] = {
306 /* No Error */ 0,
307 /* CRC Error */ -EILSEQ,
308 /* Bit Stuff */ -EPROTO,
309 /* Data Togg */ -EILSEQ,
310 /* Stall */ -EPIPE,
311 /* DevNotResp */ -ETIMEDOUT,
312 /* PIDCheck */ -EPROTO,
313 /* UnExpPID */ -EPROTO,
314 /* DataOver */ -EOVERFLOW,
315 /* DataUnder */ -EREMOTEIO,
316 /* (for hw) */ -EIO,
317 /* (for hw) */ -EIO,
318 /* BufferOver */ -ECOMM,
319 /* BuffUnder */ -ENOSR,
320 /* (for HCD) */ -ENOSPC,
321 /* (for HCD) */ -EALREADY
322};
323
324/* HCD data associated with a usb core URB */
325struct urb_priv {
326 struct urb *urb;
327 struct usb_host_endpoint *ep;
328 int active;
329 int state;
330 struct td *isoc_td;
331 int isoc_remaining;
332 int isoc_status;
333};
334
335/* HCD data associated with a usb core endpoint */
336struct ep_priv {
337 struct usb_host_endpoint *ep;
338 struct list_head td_list;
339 struct list_head queue;
340 int etd[NUM_ISO_ETDS];
341 int waiting_etd;
342};
343
344/* isoc packet */
345struct td {
346 struct list_head list;
347 struct urb *urb;
348 struct usb_host_endpoint *ep;
349 dma_addr_t data;
350 unsigned long buf_addr;
351 int len;
352 int frame;
353 int isoc_index;
354};
355
356/* HCD data associated with a hardware ETD */
357struct etd_priv {
358 struct usb_host_endpoint *ep;
359 struct urb *urb;
360 struct td *td;
361 struct list_head queue;
362 dma_addr_t dma_handle;
363 int alloc;
364 int len;
365 int dmem_size;
366 int dmem_offset;
367 int active_count;
368#ifdef DEBUG
369 int activated_frame;
370 int disactivated_frame;
371 int last_int_frame;
372 int last_req_frame;
373 u32 submitted_dwords[4];
374#endif
375};
376
377/* Hardware data memory info */
378struct imx21_dmem_area {
379 struct usb_host_endpoint *ep;
380 unsigned int offset;
381 unsigned int size;
382 struct list_head list;
383};
384
385#ifdef DEBUG
386struct debug_usage_stats {
387 unsigned int value;
388 unsigned int maximum;
389};
390
391struct debug_stats {
392 unsigned long submitted;
393 unsigned long completed_ok;
394 unsigned long completed_failed;
395 unsigned long unlinked;
396 unsigned long queue_etd;
397 unsigned long queue_dmem;
398};
399
400struct debug_isoc_trace {
401 int schedule_frame;
402 int submit_frame;
403 int request_len;
404 int done_frame;
405 int done_len;
406 int cc;
407 struct td *td;
408};
409#endif
410
411/* HCD data structure */
412struct imx21 {
413 spinlock_t lock;
414 struct device *dev;
415 struct mx21_usbh_platform_data *pdata;
416 struct list_head dmem_list;
417 struct list_head queue_for_etd; /* eps queued due to etd shortage */
418 struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
419 struct etd_priv etd[USB_NUM_ETD];
420 struct clk *clk;
421 void __iomem *regs;
422#ifdef DEBUG
423 struct dentry *debug_root;
424 struct debug_stats nonisoc_stats;
425 struct debug_stats isoc_stats;
426 struct debug_usage_stats etd_usage;
427 struct debug_usage_stats dmem_usage;
428 struct debug_isoc_trace isoc_trace[20];
429 struct debug_isoc_trace isoc_trace_failed[20];
430 unsigned long debug_unblocks;
431 int isoc_trace_index;
432 int isoc_trace_index_failed;
433#endif
434};
435
436#endif
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 42971657fde2..217fb5170200 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1257,7 +1257,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1257 1257
1258 /* avoid all allocations within spinlocks: request or endpoint */ 1258 /* avoid all allocations within spinlocks: request or endpoint */
1259 if (!hep->hcpriv) { 1259 if (!hep->hcpriv) {
1260 ep = kcalloc(1, sizeof *ep, mem_flags); 1260 ep = kzalloc(sizeof *ep, mem_flags);
1261 if (!ep) 1261 if (!ep)
1262 return -ENOMEM; 1262 return -ENOMEM;
1263 } 1263 }
@@ -2719,24 +2719,11 @@ static int __init isp1362_probe(struct platform_device *pdev)
2719 } 2719 }
2720 irq = irq_res->start; 2720 irq = irq_res->start;
2721 2721
2722#ifdef CONFIG_USB_HCD_DMA
2723 if (pdev->dev.dma_mask) {
2724 struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
2725
2726 if (!dma_res) {
2727 retval = -ENODEV;
2728 goto err1;
2729 }
2730 isp1362_hcd->data_dma = dma_res->start;
2731 isp1362_hcd->max_dma_size = resource_len(dma_res);
2732 }
2733#else
2734 if (pdev->dev.dma_mask) { 2722 if (pdev->dev.dma_mask) {
2735 DBG(1, "won't do DMA"); 2723 DBG(1, "won't do DMA");
2736 retval = -ENODEV; 2724 retval = -ENODEV;
2737 goto err1; 2725 goto err1;
2738 } 2726 }
2739#endif
2740 2727
2741 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { 2728 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
2742 retval = -EBUSY; 2729 retval = -EBUSY;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27b8f7cb4471..9f01293600b0 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -17,7 +17,9 @@
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/mm.h>
20#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22#include <asm/cacheflush.h>
21 23
22#include "../core/hcd.h" 24#include "../core/hcd.h"
23#include "isp1760-hcd.h" 25#include "isp1760-hcd.h"
@@ -904,6 +906,14 @@ __acquires(priv->lock)
904 status = 0; 906 status = 0;
905 } 907 }
906 908
909 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
910 void *ptr;
911 for (ptr = urb->transfer_buffer;
912 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
913 ptr += PAGE_SIZE)
914 flush_dcache_page(virt_to_page(ptr));
915 }
916
907 /* complete() can reenter this HCD */ 917 /* complete() can reenter this HCD */
908 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb); 918 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
909 spin_unlock(&priv->lock); 919 spin_unlock(&priv->lock);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 1c9f977a5c9c..4293cfd28d61 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -109,7 +109,7 @@ static int of_isp1760_remove(struct of_device *dev)
109 return 0; 109 return 0;
110} 110}
111 111
112static struct of_device_id of_isp1760_match[] = { 112static const struct of_device_id of_isp1760_match[] = {
113 { 113 {
114 .compatible = "nxp,usb-isp1760", 114 .compatible = "nxp,usb-isp1760",
115 }, 115 },
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 000000000000..4aa08d36d077
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,456 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * TI DA8xx (OMAP-L1x) Bus Glue
5 *
6 * Derived from: ohci-omap.c and ohci-s3c2410.c
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/interrupt.h>
15#include <linux/jiffies.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18
19#include <mach/da8xx.h>
20#include <mach/usb.h>
21
22#ifndef CONFIG_ARCH_DAVINCI_DA8XX
23#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
24#endif
25
26#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG)
27
28static struct clk *usb11_clk;
29static struct clk *usb20_clk;
30
31/* Over-current indicator change bitmask */
32static volatile u16 ocic_mask;
33
34static void ohci_da8xx_clock(int on)
35{
36 u32 cfgchip2;
37
38 cfgchip2 = __raw_readl(CFGCHIP2);
39 if (on) {
40 clk_enable(usb11_clk);
41
42 /*
43 * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
44 * need to enable the USB 2.0 module clocking, start its PHY,
45 * and not allow it to stop the clock during USB 2.0 suspend.
46 */
47 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
48 clk_enable(usb20_clk);
49
50 cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
51 cfgchip2 |= CFGCHIP2_PHY_PLLON;
52 __raw_writel(cfgchip2, CFGCHIP2);
53
54 pr_info("Waiting for USB PHY clock good...\n");
55 while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
56 cpu_relax();
57 }
58
59 /* Enable USB 1.1 PHY */
60 cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
61 } else {
62 clk_disable(usb11_clk);
63 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
64 clk_disable(usb20_clk);
65
66 /* Disable USB 1.1 PHY */
67 cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
68 }
69 __raw_writel(cfgchip2, CFGCHIP2);
70}
71
72/*
73 * Handle the port over-current indicator change.
74 */
75static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
76 unsigned port)
77{
78 ocic_mask |= 1 << port;
79
80 /* Once over-current is detected, the port needs to be powered down */
81 if (hub->get_oci(port) > 0)
82 hub->set_power(port, 0);
83}
84
85static int ohci_da8xx_init(struct usb_hcd *hcd)
86{
87 struct device *dev = hcd->self.controller;
88 struct da8xx_ohci_root_hub *hub = dev->platform_data;
89 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
90 int result;
91 u32 rh_a;
92
93 dev_dbg(dev, "starting USB controller\n");
94
95 ohci_da8xx_clock(1);
96
97 /*
98 * DA8xx only have 1 port connected to the pins but the HC root hub
99 * register A reports 2 ports, thus we'll have to override it...
100 */
101 ohci->num_ports = 1;
102
103 result = ohci_init(ohci);
104 if (result < 0)
105 return result;
106
107 /*
108 * Since we're providing a board-specific root hub port power control
109 * and over-current reporting, we have to override the HC root hub A
110 * register's default value, so that ohci_hub_control() could return
111 * the correct hub descriptor...
112 */
113 rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
114 if (hub->set_power) {
115 rh_a &= ~RH_A_NPS;
116 rh_a |= RH_A_PSM;
117 }
118 if (hub->get_oci) {
119 rh_a &= ~RH_A_NOCP;
120 rh_a |= RH_A_OCPM;
121 }
122 rh_a &= ~RH_A_POTPGT;
123 rh_a |= hub->potpgt << 24;
124 ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
125
126 return result;
127}
128
129static void ohci_da8xx_stop(struct usb_hcd *hcd)
130{
131 ohci_stop(hcd);
132 ohci_da8xx_clock(0);
133}
134
135static int ohci_da8xx_start(struct usb_hcd *hcd)
136{
137 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
138 int result;
139
140 result = ohci_run(ohci);
141 if (result < 0)
142 ohci_da8xx_stop(hcd);
143
144 return result;
145}
146
147/*
148 * Update the status data from the hub with the over-current indicator change.
149 */
150static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
151{
152 int length = ohci_hub_status_data(hcd, buf);
153
154 /* See if we have OCIC bit set on port 1 */
155 if (ocic_mask & (1 << 1)) {
156 dev_dbg(hcd->self.controller, "over-current indicator change "
157 "on port 1\n");
158
159 if (!length)
160 length = 1;
161
162 buf[0] |= 1 << 1;
163 }
164 return length;
165}
166
167/*
168 * Look at the control requests to the root hub and see if we need to override.
169 */
170static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
171 u16 wIndex, char *buf, u16 wLength)
172{
173 struct device *dev = hcd->self.controller;
174 struct da8xx_ohci_root_hub *hub = dev->platform_data;
175 int temp;
176
177 switch (typeReq) {
178 case GetPortStatus:
179 /* Check the port number */
180 if (wIndex != 1)
181 break;
182
183 dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
184
185 temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
186
187 /* The port power status (PPS) bit defaults to 1 */
188 if (hub->get_power && hub->get_power(wIndex) == 0)
189 temp &= ~RH_PS_PPS;
190
191 /* The port over-current indicator (POCI) bit is always 0 */
192 if (hub->get_oci && hub->get_oci(wIndex) > 0)
193 temp |= RH_PS_POCI;
194
195 /* The over-current indicator change (OCIC) bit is 0 too */
196 if (ocic_mask & (1 << wIndex))
197 temp |= RH_PS_OCIC;
198
199 put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
200 return 0;
201 case SetPortFeature:
202 temp = 1;
203 goto check_port;
204 case ClearPortFeature:
205 temp = 0;
206
207check_port:
208 /* Check the port number */
209 if (wIndex != 1)
210 break;
211
212 switch (wValue) {
213 case USB_PORT_FEAT_POWER:
214 dev_dbg(dev, "%sPortFeature(%u): %s\n",
215 temp ? "Set" : "Clear", wIndex, "POWER");
216
217 if (!hub->set_power)
218 return -EPIPE;
219
220 return hub->set_power(wIndex, temp) ? -EPIPE : 0;
221 case USB_PORT_FEAT_C_OVER_CURRENT:
222 dev_dbg(dev, "%sPortFeature(%u): %s\n",
223 temp ? "Set" : "Clear", wIndex,
224 "C_OVER_CURRENT");
225
226 if (temp)
227 ocic_mask |= 1 << wIndex;
228 else
229 ocic_mask &= ~(1 << wIndex);
230 return 0;
231 }
232 }
233
234 return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
235}
236
237static const struct hc_driver ohci_da8xx_hc_driver = {
238 .description = hcd_name,
239 .product_desc = "DA8xx OHCI",
240 .hcd_priv_size = sizeof(struct ohci_hcd),
241
242 /*
243 * generic hardware linkage
244 */
245 .irq = ohci_irq,
246 .flags = HCD_USB11 | HCD_MEMORY,
247
248 /*
249 * basic lifecycle operations
250 */
251 .reset = ohci_da8xx_init,
252 .start = ohci_da8xx_start,
253 .stop = ohci_da8xx_stop,
254 .shutdown = ohci_shutdown,
255
256 /*
257 * managing i/o requests and associated device resources
258 */
259 .urb_enqueue = ohci_urb_enqueue,
260 .urb_dequeue = ohci_urb_dequeue,
261 .endpoint_disable = ohci_endpoint_disable,
262
263 /*
264 * scheduling support
265 */
266 .get_frame_number = ohci_get_frame,
267
268 /*
269 * root hub support
270 */
271 .hub_status_data = ohci_da8xx_hub_status_data,
272 .hub_control = ohci_da8xx_hub_control,
273
274#ifdef CONFIG_PM
275 .bus_suspend = ohci_bus_suspend,
276 .bus_resume = ohci_bus_resume,
277#endif
278 .start_port_reset = ohci_start_port_reset,
279};
280
281/*-------------------------------------------------------------------------*/
282
283
284/**
285 * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
286 * Context: !in_interrupt()
287 *
288 * Allocates basic resources for this USB host controller, and
289 * then invokes the start() method for the HCD associated with it
290 * through the hotplug entry's driver_data.
291 */
292static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
293 struct platform_device *pdev)
294{
295 struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
296 struct usb_hcd *hcd;
297 struct resource *mem;
298 int error, irq;
299
300 if (hub == NULL)
301 return -ENODEV;
302
303 usb11_clk = clk_get(&pdev->dev, "usb11");
304 if (IS_ERR(usb11_clk))
305 return PTR_ERR(usb11_clk);
306
307 usb20_clk = clk_get(&pdev->dev, "usb20");
308 if (IS_ERR(usb20_clk)) {
309 error = PTR_ERR(usb20_clk);
310 goto err0;
311 }
312
313 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
314 if (!hcd) {
315 error = -ENOMEM;
316 goto err1;
317 }
318
319 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
320 if (!mem) {
321 error = -ENODEV;
322 goto err2;
323 }
324 hcd->rsrc_start = mem->start;
325 hcd->rsrc_len = mem->end - mem->start + 1;
326
327 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
328 dev_dbg(&pdev->dev, "request_mem_region failed\n");
329 error = -EBUSY;
330 goto err2;
331 }
332
333 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
334 if (!hcd->regs) {
335 dev_err(&pdev->dev, "ioremap failed\n");
336 error = -ENOMEM;
337 goto err3;
338 }
339
340 ohci_hcd_init(hcd_to_ohci(hcd));
341
342 irq = platform_get_irq(pdev, 0);
343 if (irq < 0) {
344 error = -ENODEV;
345 goto err4;
346 }
347 error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
348 if (error)
349 goto err4;
350
351 if (hub->ocic_notify) {
352 error = hub->ocic_notify(ohci_da8xx_ocic_handler);
353 if (!error)
354 return 0;
355 }
356
357 usb_remove_hcd(hcd);
358err4:
359 iounmap(hcd->regs);
360err3:
361 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
362err2:
363 usb_put_hcd(hcd);
364err1:
365 clk_put(usb20_clk);
366err0:
367 clk_put(usb11_clk);
368 return error;
369}
370
371/**
372 * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
373 * @dev: USB Host Controller being removed
374 * Context: !in_interrupt()
375 *
376 * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
377 * the HCD's stop() method. It is always called from a thread
378 * context, normally "rmmod", "apmd", or something similar.
379 */
380static inline void
381usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
382{
383 struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
384
385 hub->ocic_notify(NULL);
386 usb_remove_hcd(hcd);
387 iounmap(hcd->regs);
388 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
389 usb_put_hcd(hcd);
390 clk_put(usb20_clk);
391 clk_put(usb11_clk);
392}
393
394static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
395{
396 return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
397}
398
399static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
400{
401 struct usb_hcd *hcd = platform_get_drvdata(dev);
402
403 usb_hcd_da8xx_remove(hcd, dev);
404 platform_set_drvdata(dev, NULL);
405
406 return 0;
407}
408
409#ifdef CONFIG_PM
410static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
411{
412 struct usb_hcd *hcd = platform_get_drvdata(dev);
413 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
414
415 if (time_before(jiffies, ohci->next_statechange))
416 msleep(5);
417 ohci->next_statechange = jiffies;
418
419 ohci_da8xx_clock(0);
420 hcd->state = HC_STATE_SUSPENDED;
421 dev->dev.power.power_state = PMSG_SUSPEND;
422 return 0;
423}
424
425static int ohci_da8xx_resume(struct platform_device *dev)
426{
427 struct usb_hcd *hcd = platform_get_drvdata(dev);
428 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
429
430 if (time_before(jiffies, ohci->next_statechange))
431 msleep(5);
432 ohci->next_statechange = jiffies;
433
434 ohci_da8xx_clock(1);
435 dev->dev.power.power_state = PMSG_ON;
436 usb_hcd_resume_root_hub(hcd);
437 return 0;
438}
439#endif
440
441/*
442 * Driver definition to register with platform structure.
443 */
444static struct platform_driver ohci_hcd_da8xx_driver = {
445 .probe = ohci_hcd_da8xx_drv_probe,
446 .remove = ohci_hcd_da8xx_drv_remove,
447 .shutdown = usb_hcd_platform_shutdown,
448#ifdef CONFIG_PM
449 .suspend = ohci_da8xx_suspend,
450 .resume = ohci_da8xx_resume,
451#endif
452 .driver = {
453 .owner = THIS_MODULE,
454 .name = "ohci",
455 },
456};
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 811f5dfdc582..8ad2441b0284 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -53,13 +53,13 @@ urb_print(struct urb * urb, char * str, int small, int status)
53 int i, len; 53 int i, len;
54 54
55 if (usb_pipecontrol (pipe)) { 55 if (usb_pipecontrol (pipe)) {
56 printk (KERN_DEBUG __FILE__ ": setup(8):"); 56 printk (KERN_DEBUG "%s: setup(8):", __FILE__);
57 for (i = 0; i < 8 ; i++) 57 for (i = 0; i < 8 ; i++)
58 printk (" %02x", ((__u8 *) urb->setup_packet) [i]); 58 printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
59 printk ("\n"); 59 printk ("\n");
60 } 60 }
61 if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) { 61 if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
62 printk (KERN_DEBUG __FILE__ ": data(%d/%d):", 62 printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
63 urb->actual_length, 63 urb->actual_length,
64 urb->transfer_buffer_length); 64 urb->transfer_buffer_length);
65 len = usb_pipeout (pipe)? 65 len = usb_pipeout (pipe)?
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 24eb74781919..afe59be23645 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1051,6 +1051,11 @@ MODULE_LICENSE ("GPL");
1051#define PLATFORM_DRIVER usb_hcd_pnx4008_driver 1051#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
1052#endif 1052#endif
1053 1053
1054#ifdef CONFIG_ARCH_DAVINCI_DA8XX
1055#include "ohci-da8xx.c"
1056#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
1057#endif
1058
1054#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 1059#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
1055 defined(CONFIG_CPU_SUBTYPE_SH7721) || \ 1060 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
1056 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1061 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index de42283149c7..18d39f0463ee 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -28,8 +28,8 @@ extern int usb_disabled(void);
28 28
29static void lh7a404_start_hc(struct platform_device *dev) 29static void lh7a404_start_hc(struct platform_device *dev)
30{ 30{
31 printk(KERN_DEBUG __FILE__ 31 printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
32 ": starting LH7A404 OHCI USB Controller\n"); 32 __FILE__);
33 33
34 /* 34 /*
35 * Now, carefully enable the USB clock, and take 35 * Now, carefully enable the USB clock, and take
@@ -39,14 +39,13 @@ static void lh7a404_start_hc(struct platform_device *dev)
39 udelay(1000); 39 udelay(1000);
40 USBH_CMDSTATUS = OHCI_HCR; 40 USBH_CMDSTATUS = OHCI_HCR;
41 41
42 printk(KERN_DEBUG __FILE__ 42 printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
43 ": Clock to USB host has been enabled \n");
44} 43}
45 44
46static void lh7a404_stop_hc(struct platform_device *dev) 45static void lh7a404_stop_hc(struct platform_device *dev)
47{ 46{
48 printk(KERN_DEBUG __FILE__ 47 printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
49 ": stopping LH7A404 OHCI USB Controller\n"); 48 __FILE__);
50 49
51 CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */ 50 CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
52} 51}
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 2769326da42e..cd74bbdd007c 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
327 } 327 }
328 i2c_adap = i2c_get_adapter(2); 328 i2c_adap = i2c_get_adapter(2);
329 memset(&i2c_info, 0, sizeof(struct i2c_board_info)); 329 memset(&i2c_info, 0, sizeof(struct i2c_board_info));
330 strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); 330 strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
331 isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, 331 isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
332 normal_i2c); 332 normal_i2c);
333 i2c_put_adapter(i2c_adap); 333 i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@ out3:
411out2: 411out2:
412 clk_put(usb_clk); 412 clk_put(usb_clk);
413out1: 413out1:
414 i2c_unregister_client(isp1301_i2c_client); 414 i2c_unregister_device(isp1301_i2c_client);
415 isp1301_i2c_client = NULL; 415 isp1301_i2c_client = NULL;
416out_i2c_driver: 416out_i2c_driver:
417 i2c_del_driver(&isp1301_driver); 417 i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
430 pnx4008_unset_usb_bits(); 430 pnx4008_unset_usb_bits();
431 clk_disable(usb_clk); 431 clk_disable(usb_clk);
432 clk_put(usb_clk); 432 clk_put(usb_clk);
433 i2c_unregister_client(isp1301_i2c_client); 433 i2c_unregister_device(isp1301_i2c_client);
434 isp1301_i2c_client = NULL; 434 isp1301_i2c_client = NULL;
435 i2c_del_driver(&isp1301_driver); 435 i2c_del_driver(&isp1301_driver);
436 436
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 68a301710297..103263c230cf 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -114,21 +114,21 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
114 hcd->rsrc_len = res.end - res.start + 1; 114 hcd->rsrc_len = res.end - res.start + 1;
115 115
116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
117 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 117 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
118 rv = -EBUSY; 118 rv = -EBUSY;
119 goto err_rmr; 119 goto err_rmr;
120 } 120 }
121 121
122 irq = irq_of_parse_and_map(dn, 0); 122 irq = irq_of_parse_and_map(dn, 0);
123 if (irq == NO_IRQ) { 123 if (irq == NO_IRQ) {
124 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 124 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
125 rv = -EBUSY; 125 rv = -EBUSY;
126 goto err_irq; 126 goto err_irq;
127 } 127 }
128 128
129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
130 if (!hcd->regs) { 130 if (!hcd->regs) {
131 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 131 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
132 rv = -ENOMEM; 132 rv = -ENOMEM;
133 goto err_ioremap; 133 goto err_ioremap;
134 } 134 }
@@ -169,7 +169,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
169 } else 169 } else
170 release_mem_region(res.start, 0x4); 170 release_mem_region(res.start, 0x4);
171 } else 171 } else
172 pr_debug(__FILE__ ": cannot get ehci offset from fdt\n"); 172 pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
173 } 173 }
174 174
175 iounmap(hcd->regs); 175 iounmap(hcd->regs);
@@ -212,7 +212,7 @@ static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
212} 212}
213 213
214 214
215static struct of_device_id ohci_hcd_ppc_of_match[] = { 215static const struct of_device_id ohci_hcd_ppc_of_match[] = {
216#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE 216#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
217 { 217 {
218 .name = "usb", 218 .name = "usb",
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index cd3398b675b2..89e670e38c10 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -41,14 +41,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
41 41
42 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 42 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
43 if (!res) { 43 if (!res) {
44 pr_debug(__FILE__ ": no irq\n"); 44 pr_debug("%s: no irq\n", __FILE__);
45 return -ENODEV; 45 return -ENODEV;
46 } 46 }
47 irq = res->start; 47 irq = res->start;
48 48
49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 if (!res) { 50 if (!res) {
51 pr_debug(__FILE__ ": no reg addr\n"); 51 pr_debug("%s: no reg addr\n", __FILE__);
52 return -ENODEV; 52 return -ENODEV;
53 } 53 }
54 54
@@ -59,14 +59,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
59 hcd->rsrc_len = res->end - res->start + 1; 59 hcd->rsrc_len = res->end - res->start + 1;
60 60
61 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 61 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
62 pr_debug(__FILE__ ": request_mem_region failed\n"); 62 pr_debug("%s: request_mem_region failed\n", __FILE__);
63 retval = -EBUSY; 63 retval = -EBUSY;
64 goto err1; 64 goto err1;
65 } 65 }
66 66
67 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 67 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
68 if (!hcd->regs) { 68 if (!hcd->regs) {
69 pr_debug(__FILE__ ": ioremap failed\n"); 69 pr_debug("%s: ioremap failed\n", __FILE__);
70 retval = -ENOMEM; 70 retval = -ENOMEM;
71 goto err2; 71 goto err2;
72 } 72 }
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index e4bbe8e188e4..d8eb3bdafabb 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -31,8 +31,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
31{ 31{
32 unsigned int usb_rst = 0; 32 unsigned int usb_rst = 0;
33 33
34 printk(KERN_DEBUG __FILE__ 34 printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
35 ": starting SA-1111 OHCI USB Controller\n"); 35 __FILE__);
36 36
37#ifdef CONFIG_SA1100_BADGE4 37#ifdef CONFIG_SA1100_BADGE4
38 if (machine_is_badge4()) { 38 if (machine_is_badge4()) {
@@ -65,8 +65,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
65static void sa1111_stop_hc(struct sa1111_dev *dev) 65static void sa1111_stop_hc(struct sa1111_dev *dev)
66{ 66{
67 unsigned int usb_rst; 67 unsigned int usb_rst;
68 printk(KERN_DEBUG __FILE__ 68 printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
69 ": stopping SA-1111 OHCI USB Controller\n"); 69 __FILE__);
70 70
71 /* 71 /*
72 * Put the USB host controller into reset. 72 * Put the USB host controller into reset.
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b22a4d1c9e4..e11cc3aa4b82 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -51,6 +51,7 @@
51#include <asm/irq.h> 51#include <asm/irq.h>
52#include <asm/system.h> 52#include <asm/system.h>
53#include <asm/byteorder.h> 53#include <asm/byteorder.h>
54#include <asm/unaligned.h>
54 55
55#include "../core/hcd.h" 56#include "../core/hcd.h"
56#include "sl811.h" 57#include "sl811.h"
@@ -1272,12 +1273,12 @@ sl811h_hub_control(
1272 sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf); 1273 sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
1273 break; 1274 break;
1274 case GetHubStatus: 1275 case GetHubStatus:
1275 *(__le32 *) buf = cpu_to_le32(0); 1276 put_unaligned_le32(0, buf);
1276 break; 1277 break;
1277 case GetPortStatus: 1278 case GetPortStatus:
1278 if (wIndex != 1) 1279 if (wIndex != 1)
1279 goto error; 1280 goto error;
1280 *(__le32 *) buf = cpu_to_le32(sl811->port1); 1281 put_unaligned_le32(sl811->port1, buf);
1281 1282
1282#ifndef VERBOSE 1283#ifndef VERBOSE
1283 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ 1284 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 99cd00fd3514..09197067fe6b 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hcd)
735 uhci_hc_died(uhci); 735 uhci_hc_died(uhci);
736 uhci_scan_schedule(uhci); 736 uhci_scan_schedule(uhci);
737 spin_unlock_irq(&uhci->lock); 737 spin_unlock_irq(&uhci->lock);
738 synchronize_irq(hcd->irq);
738 739
739 del_timer_sync(&uhci->fsbr_timer); 740 del_timer_sync(&uhci->fsbr_timer);
740 release_uhci(uhci); 741 release_uhci(uhci);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 33128d52f212..105fa8b025bb 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
406 } 406 }
407} 407}
408 408
409char *xhci_get_slot_state(struct xhci_hcd *xhci,
410 struct xhci_container_ctx *ctx)
411{
412 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
413
414 switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
415 case 0:
416 return "enabled/disabled";
417 case 1:
418 return "default";
419 case 2:
420 return "addressed";
421 case 3:
422 return "configured";
423 default:
424 return "reserved";
425 }
426}
427
409void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) 428void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
410{ 429{
411 /* Fields are 32 bits wide, DMA addresses are in bytes */ 430 /* Fields are 32 bits wide, DMA addresses are in bytes */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index ecc131c3fe33..78c4edac1db1 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
101 101
102 next = readl(base + ext_offset); 102 next = readl(base + ext_offset);
103 103
104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET) 104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
105 /* Find the first extended capability */ 105 /* Find the first extended capability */
106 next = XHCI_HCC_EXT_CAPS(next); 106 next = XHCI_HCC_EXT_CAPS(next);
107 else 107 ext_offset = 0;
108 } else {
108 /* Find the next extended capability */ 109 /* Find the next extended capability */
109 next = XHCI_EXT_CAPS_NEXT(next); 110 next = XHCI_EXT_CAPS_NEXT(next);
111 }
112
110 if (!next) 113 if (!next)
111 return 0; 114 return 0;
112 /* 115 /*
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5e92c72df642..4cb69e0af834 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -1007,7 +1007,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1007 * for usb_set_interface() and usb_set_configuration() claim). 1007 * for usb_set_interface() and usb_set_configuration() claim).
1008 */ 1008 */
1009 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], 1009 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
1010 udev, ep, GFP_KERNEL) < 0) { 1010 udev, ep, GFP_NOIO) < 0) {
1011 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1011 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1012 __func__, ep->desc.bEndpointAddress); 1012 __func__, ep->desc.bEndpointAddress);
1013 return -ENOMEM; 1013 return -ENOMEM;
@@ -1181,6 +1181,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1181 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, 1181 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1182 udev->slot_id); 1182 udev->slot_id);
1183 if (ret < 0) { 1183 if (ret < 0) {
1184 if (command)
1185 list_del(&command->cmd_list);
1184 spin_unlock_irqrestore(&xhci->lock, flags); 1186 spin_unlock_irqrestore(&xhci->lock, flags);
1185 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 1187 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1186 return -ENOMEM; 1188 return -ENOMEM;
@@ -1264,30 +1266,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1264 xhci_zero_in_ctx(xhci, virt_dev); 1266 xhci_zero_in_ctx(xhci, virt_dev);
1265 /* Install new rings and free or cache any old rings */ 1267 /* Install new rings and free or cache any old rings */
1266 for (i = 1; i < 31; ++i) { 1268 for (i = 1; i < 31; ++i) {
1267 int rings_cached;
1268
1269 if (!virt_dev->eps[i].new_ring) 1269 if (!virt_dev->eps[i].new_ring)
1270 continue; 1270 continue;
1271 /* Only cache or free the old ring if it exists. 1271 /* Only cache or free the old ring if it exists.
1272 * It may not if this is the first add of an endpoint. 1272 * It may not if this is the first add of an endpoint.
1273 */ 1273 */
1274 if (virt_dev->eps[i].ring) { 1274 if (virt_dev->eps[i].ring) {
1275 rings_cached = virt_dev->num_rings_cached; 1275 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1276 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
1277 virt_dev->num_rings_cached++;
1278 rings_cached = virt_dev->num_rings_cached;
1279 virt_dev->ring_cache[rings_cached] =
1280 virt_dev->eps[i].ring;
1281 xhci_dbg(xhci, "Cached old ring, "
1282 "%d ring%s cached\n",
1283 rings_cached,
1284 (rings_cached > 1) ? "s" : "");
1285 } else {
1286 xhci_ring_free(xhci, virt_dev->eps[i].ring);
1287 xhci_dbg(xhci, "Ring cache full (%d rings), "
1288 "freeing ring\n",
1289 virt_dev->num_rings_cached);
1290 }
1291 } 1276 }
1292 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 1277 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1293 virt_dev->eps[i].new_ring = NULL; 1278 virt_dev->eps[i].new_ring = NULL;
@@ -1458,6 +1443,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1458} 1443}
1459 1444
1460/* 1445/*
1446 * This submits a Reset Device Command, which will set the device state to 0,
1447 * set the device address to 0, and disable all the endpoints except the default
1448 * control endpoint. The USB core should come back and call
1449 * xhci_address_device(), and then re-set up the configuration. If this is
1450 * called because of a usb_reset_and_verify_device(), then the old alternate
1451 * settings will be re-installed through the normal bandwidth allocation
1452 * functions.
1453 *
1454 * Wait for the Reset Device command to finish. Remove all structures
1455 * associated with the endpoints that were disabled. Clear the input device
1456 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
1457 */
1458int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1459{
1460 int ret, i;
1461 unsigned long flags;
1462 struct xhci_hcd *xhci;
1463 unsigned int slot_id;
1464 struct xhci_virt_device *virt_dev;
1465 struct xhci_command *reset_device_cmd;
1466 int timeleft;
1467 int last_freed_endpoint;
1468
1469 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1470 if (ret <= 0)
1471 return ret;
1472 xhci = hcd_to_xhci(hcd);
1473 slot_id = udev->slot_id;
1474 virt_dev = xhci->devs[slot_id];
1475 if (!virt_dev) {
1476 xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
1477 __func__, slot_id);
1478 return -EINVAL;
1479 }
1480
1481 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
1482 /* Allocate the command structure that holds the struct completion.
1483 * Assume we're in process context, since the normal device reset
1484 * process has to wait for the device anyway. Storage devices are
1485 * reset as part of error handling, so use GFP_NOIO instead of
1486 * GFP_KERNEL.
1487 */
1488 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
1489 if (!reset_device_cmd) {
1490 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
1491 return -ENOMEM;
1492 }
1493
1494 /* Attempt to submit the Reset Device command to the command ring */
1495 spin_lock_irqsave(&xhci->lock, flags);
1496 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
1497 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
1498 ret = xhci_queue_reset_device(xhci, slot_id);
1499 if (ret) {
1500 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1501 list_del(&reset_device_cmd->cmd_list);
1502 spin_unlock_irqrestore(&xhci->lock, flags);
1503 goto command_cleanup;
1504 }
1505 xhci_ring_cmd_db(xhci);
1506 spin_unlock_irqrestore(&xhci->lock, flags);
1507
1508 /* Wait for the Reset Device command to finish */
1509 timeleft = wait_for_completion_interruptible_timeout(
1510 reset_device_cmd->completion,
1511 USB_CTRL_SET_TIMEOUT);
1512 if (timeleft <= 0) {
1513 xhci_warn(xhci, "%s while waiting for reset device command\n",
1514 timeleft == 0 ? "Timeout" : "Signal");
1515 spin_lock_irqsave(&xhci->lock, flags);
1516 /* The timeout might have raced with the event ring handler, so
1517 * only delete from the list if the item isn't poisoned.
1518 */
1519 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
1520 list_del(&reset_device_cmd->cmd_list);
1521 spin_unlock_irqrestore(&xhci->lock, flags);
1522 ret = -ETIME;
1523 goto command_cleanup;
1524 }
1525
1526 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
1527 * unless we tried to reset a slot ID that wasn't enabled,
1528 * or the device wasn't in the addressed or configured state.
1529 */
1530 ret = reset_device_cmd->status;
1531 switch (ret) {
1532 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
1533 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
1534 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
1535 slot_id,
1536 xhci_get_slot_state(xhci, virt_dev->out_ctx));
1537 xhci_info(xhci, "Not freeing device rings.\n");
1538 /* Don't treat this as an error. May change my mind later. */
1539 ret = 0;
1540 goto command_cleanup;
1541 case COMP_SUCCESS:
1542 xhci_dbg(xhci, "Successful reset device command.\n");
1543 break;
1544 default:
1545 if (xhci_is_vendor_info_code(xhci, ret))
1546 break;
1547 xhci_warn(xhci, "Unknown completion code %u for "
1548 "reset device command.\n", ret);
1549 ret = -EINVAL;
1550 goto command_cleanup;
1551 }
1552
1553 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
1554 last_freed_endpoint = 1;
1555 for (i = 1; i < 31; ++i) {
1556 if (!virt_dev->eps[i].ring)
1557 continue;
1558 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1559 last_freed_endpoint = i;
1560 }
1561 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
1562 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
1563 ret = 0;
1564
1565command_cleanup:
1566 xhci_free_command(xhci, reset_device_cmd);
1567 return ret;
1568}
1569
1570/*
1461 * At this point, the struct usb_device is about to go away, the device has 1571 * At this point, the struct usb_device is about to go away, the device has
1462 * disconnected, and all traffic has been stopped and the endpoints have been 1572 * disconnected, and all traffic has been stopped and the endpoints have been
1463 * disabled. Free any HC data structures associated with that device. 1573 * disabled. Free any HC data structures associated with that device.
@@ -1694,7 +1804,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1694 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 1804 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
1695 return -EINVAL; 1805 return -EINVAL;
1696 } 1806 }
1697 config_cmd = xhci_alloc_command(xhci, true, mem_flags); 1807 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1698 if (!config_cmd) { 1808 if (!config_cmd) {
1699 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 1809 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1700 return -ENOMEM; 1810 return -ENOMEM;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index eac5b53aa9e7..208b805b80eb 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -129,6 +129,50 @@ static u32 xhci_port_state_to_neutral(u32 state)
129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); 129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
130} 130}
131 131
132static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
133 u32 __iomem *addr, u32 port_status)
134{
135 /* Write 1 to disable the port */
136 xhci_writel(xhci, port_status | PORT_PE, addr);
137 port_status = xhci_readl(xhci, addr);
138 xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
139 wIndex, port_status);
140}
141
142static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
143 u16 wIndex, u32 __iomem *addr, u32 port_status)
144{
145 char *port_change_bit;
146 u32 status;
147
148 switch (wValue) {
149 case USB_PORT_FEAT_C_RESET:
150 status = PORT_RC;
151 port_change_bit = "reset";
152 break;
153 case USB_PORT_FEAT_C_CONNECTION:
154 status = PORT_CSC;
155 port_change_bit = "connect";
156 break;
157 case USB_PORT_FEAT_C_OVER_CURRENT:
158 status = PORT_OCC;
159 port_change_bit = "over-current";
160 break;
161 case USB_PORT_FEAT_C_ENABLE:
162 status = PORT_PEC;
163 port_change_bit = "enable/disable";
164 break;
165 default:
166 /* Should never happen */
167 return;
168 }
169 /* Change bits are all write 1 to clear */
170 xhci_writel(xhci, port_status | status, addr);
171 port_status = xhci_readl(xhci, addr);
172 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
173 port_change_bit, wIndex, port_status);
174}
175
132int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, 176int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
133 u16 wIndex, char *buf, u16 wLength) 177 u16 wIndex, char *buf, u16 wLength)
134{ 178{
@@ -138,7 +182,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
138 u32 temp, status; 182 u32 temp, status;
139 int retval = 0; 183 int retval = 0;
140 u32 __iomem *addr; 184 u32 __iomem *addr;
141 char *port_change_bit;
142 185
143 ports = HCS_MAX_PORTS(xhci->hcs_params1); 186 ports = HCS_MAX_PORTS(xhci->hcs_params1);
144 187
@@ -229,26 +272,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
229 temp = xhci_port_state_to_neutral(temp); 272 temp = xhci_port_state_to_neutral(temp);
230 switch (wValue) { 273 switch (wValue) {
231 case USB_PORT_FEAT_C_RESET: 274 case USB_PORT_FEAT_C_RESET:
232 status = PORT_RC;
233 port_change_bit = "reset";
234 break;
235 case USB_PORT_FEAT_C_CONNECTION: 275 case USB_PORT_FEAT_C_CONNECTION:
236 status = PORT_CSC;
237 port_change_bit = "connect";
238 break;
239 case USB_PORT_FEAT_C_OVER_CURRENT: 276 case USB_PORT_FEAT_C_OVER_CURRENT:
240 status = PORT_OCC; 277 case USB_PORT_FEAT_C_ENABLE:
241 port_change_bit = "over-current"; 278 xhci_clear_port_change_bit(xhci, wValue, wIndex,
279 addr, temp);
280 break;
281 case USB_PORT_FEAT_ENABLE:
282 xhci_disable_port(xhci, wIndex, addr, temp);
242 break; 283 break;
243 default: 284 default:
244 goto error; 285 goto error;
245 } 286 }
246 /* Change bits are all write 1 to clear */
247 xhci_writel(xhci, temp | status, addr);
248 temp = xhci_readl(xhci, addr);
249 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
250 port_change_bit, wIndex, temp);
251 temp = xhci_readl(xhci, addr); /* unblock any posted writes */
252 break; 287 break;
253 default: 288 default:
254error: 289error:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bffcef7a5545..49f7d72f8b1b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -198,6 +198,31 @@ fail:
198 return 0; 198 return 0;
199} 199}
200 200
201void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
202 struct xhci_virt_device *virt_dev,
203 unsigned int ep_index)
204{
205 int rings_cached;
206
207 rings_cached = virt_dev->num_rings_cached;
208 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
209 virt_dev->num_rings_cached++;
210 rings_cached = virt_dev->num_rings_cached;
211 virt_dev->ring_cache[rings_cached] =
212 virt_dev->eps[ep_index].ring;
213 xhci_dbg(xhci, "Cached old ring, "
214 "%d ring%s cached\n",
215 rings_cached,
216 (rings_cached > 1) ? "s" : "");
217 } else {
218 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
219 xhci_dbg(xhci, "Ring cache full (%d rings), "
220 "freeing ring\n",
221 virt_dev->num_rings_cached);
222 }
223 virt_dev->eps[ep_index].ring = NULL;
224}
225
201/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue 226/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
202 * pointers to the beginning of the ring. 227 * pointers to the beginning of the ring.
203 */ 228 */
@@ -242,6 +267,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
242void xhci_free_container_ctx(struct xhci_hcd *xhci, 267void xhci_free_container_ctx(struct xhci_hcd *xhci,
243 struct xhci_container_ctx *ctx) 268 struct xhci_container_ctx *ctx)
244{ 269{
270 if (!ctx)
271 return;
245 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 272 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
246 kfree(ctx); 273 kfree(ctx);
247} 274}
@@ -427,7 +454,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
427 case USB_SPEED_LOW: 454 case USB_SPEED_LOW:
428 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 455 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
429 break; 456 break;
430 case USB_SPEED_VARIABLE: 457 case USB_SPEED_WIRELESS:
431 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 458 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
432 return -EINVAL; 459 return -EINVAL;
433 break; 460 break;
@@ -471,7 +498,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
471 case USB_SPEED_LOW: 498 case USB_SPEED_LOW:
472 ep0_ctx->ep_info2 |= MAX_PACKET(8); 499 ep0_ctx->ep_info2 |= MAX_PACKET(8);
473 break; 500 break;
474 case USB_SPEED_VARIABLE: 501 case USB_SPEED_WIRELESS:
475 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 502 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
476 return -EINVAL; 503 return -EINVAL;
477 break; 504 break;
@@ -819,7 +846,8 @@ static void scratchpad_free(struct xhci_hcd *xhci)
819} 846}
820 847
821struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 848struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
822 bool allocate_completion, gfp_t mem_flags) 849 bool allocate_in_ctx, bool allocate_completion,
850 gfp_t mem_flags)
823{ 851{
824 struct xhci_command *command; 852 struct xhci_command *command;
825 853
@@ -827,11 +855,14 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
827 if (!command) 855 if (!command)
828 return NULL; 856 return NULL;
829 857
830 command->in_ctx = 858 if (allocate_in_ctx) {
831 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); 859 command->in_ctx =
832 if (!command->in_ctx) { 860 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
833 kfree(command); 861 mem_flags);
834 return NULL; 862 if (!command->in_ctx) {
863 kfree(command);
864 return NULL;
865 }
835 } 866 }
836 867
837 if (allocate_completion) { 868 if (allocate_completion) {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e097008d6fb1..417d37aff8d7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -139,6 +139,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
139 .reset_bandwidth = xhci_reset_bandwidth, 139 .reset_bandwidth = xhci_reset_bandwidth,
140 .address_device = xhci_address_device, 140 .address_device = xhci_address_device,
141 .update_hub_device = xhci_update_hub_device, 141 .update_hub_device = xhci_update_hub_device,
142 .reset_device = xhci_reset_device,
142 143
143 /* 144 /*
144 * scheduling support 145 * scheduling support
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ee7bc7ecbc59..6ba841bca4a2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -953,6 +953,17 @@ bandwidth_change:
953 case TRB_TYPE(TRB_RESET_EP): 953 case TRB_TYPE(TRB_RESET_EP):
954 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 954 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
955 break; 955 break;
956 case TRB_TYPE(TRB_RESET_DEV):
957 xhci_dbg(xhci, "Completed reset device command.\n");
958 slot_id = TRB_TO_SLOT_ID(
959 xhci->cmd_ring->dequeue->generic.field[3]);
960 virt_dev = xhci->devs[slot_id];
961 if (virt_dev)
962 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
963 else
964 xhci_warn(xhci, "Reset device command completion "
965 "for disabled slot %u\n", slot_id);
966 break;
956 default: 967 default:
957 /* Skip over unknown commands on the event ring */ 968 /* Skip over unknown commands on the event ring */
958 xhci->error_bitmask |= 1 << 6; 969 xhci->error_bitmask |= 1 << 6;
@@ -1080,6 +1091,20 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1080 return 0; 1091 return 0;
1081} 1092}
1082 1093
1094int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1095{
1096 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1097 /* Vendor defined "informational" completion code,
1098 * treat as not-an-error.
1099 */
1100 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1101 trb_comp_code);
1102 xhci_dbg(xhci, "Treating code as success.\n");
1103 return 1;
1104 }
1105 return 0;
1106}
1107
1083/* 1108/*
1084 * If this function returns an error condition, it means it got a Transfer 1109 * If this function returns an error condition, it means it got a Transfer
1085 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1110 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1196,13 +1221,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1196 status = -ENOSR; 1221 status = -ENOSR;
1197 break; 1222 break;
1198 default: 1223 default:
1199 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1224 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1200 /* Vendor defined "informational" completion code,
1201 * treat as not-an-error.
1202 */
1203 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1204 trb_comp_code);
1205 xhci_dbg(xhci, "Treating code as success.\n");
1206 status = 0; 1225 status = 0;
1207 break; 1226 break;
1208 } 1227 }
@@ -2181,6 +2200,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
2181 false); 2200 false);
2182} 2201}
2183 2202
2203/* Queue a reset device command TRB */
2204int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
2205{
2206 return queue_command(xhci, 0, 0, 0,
2207 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
2208 false);
2209}
2210
2184/* Queue a configure endpoint command TRB */ 2211/* Queue a configure endpoint command TRB */
2185int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 2212int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
2186 u32 slot_id, bool command_must_succeed) 2213 u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 877813505ef2..e5eb09b2f38e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1210,6 +1210,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1210void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1210void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1211void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1211void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1212void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); 1212void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1213char *xhci_get_slot_state(struct xhci_hcd *xhci,
1214 struct xhci_container_ctx *ctx);
1213 1215
1214/* xHCI memory management */ 1216/* xHCI memory management */
1215void xhci_mem_cleanup(struct xhci_hcd *xhci); 1217void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1233,8 +1235,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1233 struct usb_device *udev, struct usb_host_endpoint *ep, 1235 struct usb_device *udev, struct usb_host_endpoint *ep,
1234 gfp_t mem_flags); 1236 gfp_t mem_flags);
1235void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); 1237void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1238void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
1239 struct xhci_virt_device *virt_dev,
1240 unsigned int ep_index);
1236struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1241struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1237 bool allocate_completion, gfp_t mem_flags); 1242 bool allocate_in_ctx, bool allocate_completion,
1243 gfp_t mem_flags);
1238void xhci_free_command(struct xhci_hcd *xhci, 1244void xhci_free_command(struct xhci_hcd *xhci,
1239 struct xhci_command *command); 1245 struct xhci_command *command);
1240 1246
@@ -1264,6 +1270,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1264int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1270int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1265int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1271int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1266void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); 1272void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
1273int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
1267int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1274int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1268void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1275void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1269 1276
@@ -1272,6 +1279,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1272struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1279struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1273 union xhci_trb *start_trb, union xhci_trb *end_trb, 1280 union xhci_trb *start_trb, union xhci_trb *end_trb,
1274 dma_addr_t suspect_dma); 1281 dma_addr_t suspect_dma);
1282int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
1275void xhci_ring_cmd_db(struct xhci_hcd *xhci); 1283void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1276void *xhci_setup_one_noop(struct xhci_hcd *xhci); 1284void *xhci_setup_one_noop(struct xhci_hcd *xhci);
1277void xhci_handle_event(struct xhci_hcd *xhci); 1285void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1293,6 +1301,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1293 u32 slot_id); 1301 u32 slot_id);
1294int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 1302int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1295 unsigned int ep_index); 1303 unsigned int ep_index);
1304int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
1296void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 1305void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1297 unsigned int slot_id, unsigned int ep_index, 1306 unsigned int slot_id, unsigned int ep_index,
1298 struct xhci_td *cur_td, struct xhci_dequeue_state *state); 1307 struct xhci_td *cur_td, struct xhci_dequeue_state *state);
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index eca355dccf65..e192e8f7c560 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -967,7 +967,7 @@ static const struct file_operations mdc800_device_ops =
967 967
968 968
969 969
970static struct usb_device_id mdc800_table [] = { 970static const struct usb_device_id mdc800_table[] = {
971 { USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) }, 971 { USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) },
972 { } /* Terminating entry */ 972 { } /* Terminating entry */
973}; 973};
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 459a7287fe01..3a6bcd5fee09 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -155,7 +155,7 @@ static int mts_usb_probe(struct usb_interface *intf,
155 const struct usb_device_id *id); 155 const struct usb_device_id *id);
156static void mts_usb_disconnect(struct usb_interface *intf); 156static void mts_usb_disconnect(struct usb_interface *intf);
157 157
158static struct usb_device_id mts_usb_ids []; 158static const struct usb_device_id mts_usb_ids[];
159 159
160static struct usb_driver mts_usb_driver = { 160static struct usb_driver mts_usb_driver = {
161 .name = "microtekX6", 161 .name = "microtekX6",
@@ -656,7 +656,7 @@ static struct scsi_host_template mts_scsi_host_template = {
656/* The entries of microtek_table must correspond, line-by-line to 656/* The entries of microtek_table must correspond, line-by-line to
657 the entries of mts_supported_products[]. */ 657 the entries of mts_supported_products[]. */
658 658
659static struct usb_device_id mts_usb_ids [] = 659static const struct usb_device_id mts_usb_ids[] =
660{ 660{
661 { USB_DEVICE(0x4ce, 0x0300) }, 661 { USB_DEVICE(0x4ce, 0x0300) },
662 { USB_DEVICE(0x5da, 0x0094) }, 662 { USB_DEVICE(0x5da, 0x0094) },
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index abe3aa67ed00..55660eaf947c 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -87,17 +87,6 @@ config USB_LCD
87 To compile this driver as a module, choose M here: the 87 To compile this driver as a module, choose M here: the
88 module will be called usblcd. 88 module will be called usblcd.
89 89
90config USB_BERRY_CHARGE
91 tristate "USB BlackBerry recharge support"
92 depends on USB
93 help
94 Say Y here if you want to connect a BlackBerry device to your
95 computer's USB port and have it automatically switch to "recharge"
96 mode.
97
98 To compile this driver as a module, choose M here: the
99 module will be called berry_charge.
100
101config USB_LED 90config USB_LED
102 tristate "USB LED driver support" 91 tristate "USB LED driver support"
103 depends on USB 92 depends on USB
@@ -242,17 +231,3 @@ config USB_ISIGHTFW
242 driver beforehand. Tools for doing so are available at 231 driver beforehand. Tools for doing so are available at
243 http://bersace03.free.fr 232 http://bersace03.free.fr
244 233
245config USB_VST
246 tristate "USB VST driver"
247 depends on USB
248 help
249 This driver is intended for Vernier Software Technologies
250 bulk usb devices such as their Ocean-Optics spectrometers or
251 Labquest.
252 It is a bulk channel driver with configurable read and write
253 timeouts.
254
255 To compile this driver as a module, choose M here: the
256 module will be called vstusb.
257
258
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 0826aab8303f..717703e81425 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
5 5
6obj-$(CONFIG_USB_ADUTUX) += adutux.o 6obj-$(CONFIG_USB_ADUTUX) += adutux.o
7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o 7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
8obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
9obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o 8obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
10obj-$(CONFIG_USB_CYTHERM) += cytherm.o 9obj-$(CONFIG_USB_CYTHERM) += cytherm.o
11obj-$(CONFIG_USB_EMI26) += emi26.o 10obj-$(CONFIG_USB_EMI26) += emi26.o
@@ -23,7 +22,6 @@ obj-$(CONFIG_USB_TEST) += usbtest.o
23obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o 22obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
24obj-$(CONFIG_USB_USS720) += uss720.o 23obj-$(CONFIG_USB_USS720) += uss720.o
25obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o 24obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
26obj-$(CONFIG_USB_VST) += vstusb.o
27 25
28obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/ 26obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
29 27
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 203526542013..d240de097c62 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -38,7 +38,7 @@ static int debug = 1;
38#define dbg(lvl, format, arg...) \ 38#define dbg(lvl, format, arg...) \
39do { \ 39do { \
40 if (debug >= lvl) \ 40 if (debug >= lvl) \
41 printk(KERN_DEBUG __FILE__ " : " format " \n", ## arg); \ 41 printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
42} while (0) 42} while (0)
43 43
44 44
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(debug, "Debug enabled or not");
56#define ADU_PRODUCT_ID 0x0064 56#define ADU_PRODUCT_ID 0x0064
57 57
58/* table of devices that work with this driver */ 58/* table of devices that work with this driver */
59static struct usb_device_id device_table [] = { 59static const struct usb_device_id device_table[] = {
60 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */ 60 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */
61 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */ 61 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */
62 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */ 62 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */
@@ -132,8 +132,8 @@ static void adu_debug_data(int level, const char *function, int size,
132 if (debug < level) 132 if (debug < level)
133 return; 133 return;
134 134
135 printk(KERN_DEBUG __FILE__": %s - length = %d, data = ", 135 printk(KERN_DEBUG "%s: %s - length = %d, data = ",
136 function, size); 136 __FILE__, function, size);
137 for (i = 0; i < size; ++i) 137 for (i = 0; i < size; ++i)
138 printk("%.2x ", data[i]); 138 printk("%.2x ", data[i]);
139 printk("\n"); 139 printk("\n");
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1eb9e4162cc6..4d2952f1fb13 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -57,7 +57,7 @@
57 .bInterfaceProtocol = 0x00 57 .bInterfaceProtocol = 0x00
58 58
59/* table of devices that work with this driver */ 59/* table of devices that work with this driver */
60static struct usb_device_id appledisplay_table [] = { 60static const struct usb_device_id appledisplay_table[] = {
61 { APPLEDISPLAY_DEVICE(0x9218) }, 61 { APPLEDISPLAY_DEVICE(0x9218) },
62 { APPLEDISPLAY_DEVICE(0x9219) }, 62 { APPLEDISPLAY_DEVICE(0x9219) },
63 { APPLEDISPLAY_DEVICE(0x921c) }, 63 { APPLEDISPLAY_DEVICE(0x921c) },
@@ -179,7 +179,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
179 return pdata->msgdata[1]; 179 return pdata->msgdata[1];
180} 180}
181 181
182static struct backlight_ops appledisplay_bl_data = { 182static const struct backlight_ops appledisplay_bl_data = {
183 .get_brightness = appledisplay_bl_get_brightness, 183 .get_brightness = appledisplay_bl_get_brightness,
184 .update_status = appledisplay_bl_update_status, 184 .update_status = appledisplay_bl_update_status,
185}; 185};
@@ -283,6 +283,7 @@ static int appledisplay_probe(struct usb_interface *iface,
283 &appledisplay_bl_data); 283 &appledisplay_bl_data);
284 if (IS_ERR(pdata->bd)) { 284 if (IS_ERR(pdata->bd)) {
285 dev_err(&iface->dev, "Backlight registration failed\n"); 285 dev_err(&iface->dev, "Backlight registration failed\n");
286 retval = PTR_ERR(pdata->bd);
286 goto error; 287 goto error;
287 } 288 }
288 289
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
deleted file mode 100644
index c05a85bc5925..000000000000
--- a/drivers/usb/misc/berry_charge.c
+++ /dev/null
@@ -1,183 +0,0 @@
1/*
2 * USB BlackBerry charging module
3 *
4 * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 *
10 * Information on how to switch configs was taken by the bcharge.cc file
11 * created by the barry.sf.net project.
12 *
13 * bcharge.cc has the following copyright:
14 * Copyright (C) 2006, Net Direct Inc. (http://www.netdirect.ca/)
15 * and is released under the GPLv2.
16 *
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/usb.h>
26
27#define RIM_VENDOR 0x0fca
28#define BLACKBERRY 0x0001
29#define BLACKBERRY_PEARL_DUAL 0x0004
30#define BLACKBERRY_PEARL 0x0006
31
32static int debug;
33static int pearl_dual_mode = 1;
34
35#ifdef dbg
36#undef dbg
37#endif
38#define dbg(dev, format, arg...) \
39 if (debug) \
40 dev_printk(KERN_DEBUG , dev , format , ## arg)
41
42static struct usb_device_id id_table [] = {
43 { USB_DEVICE(RIM_VENDOR, BLACKBERRY) },
44 { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL) },
45 { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL_DUAL) },
46 { }, /* Terminating entry */
47};
48MODULE_DEVICE_TABLE(usb, id_table);
49
50static int magic_charge(struct usb_device *udev)
51{
52 char *dummy_buffer = kzalloc(2, GFP_KERNEL);
53 int retval;
54
55 if (!dummy_buffer)
56 return -ENOMEM;
57
58 /* send two magic commands and then set the configuration. The device
59 * will then reset itself with the new power usage and should start
60 * charging. */
61
62 /* Note, with testing, it only seems that the first message is really
63 * needed (at least for the 8700c), but to be safe, we emulate what
64 * other operating systems seem to be sending to their device. We
65 * really need to get some specs for this device to be sure about what
66 * is going on here.
67 */
68 dbg(&udev->dev, "Sending first magic command\n");
69 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
70 0xa5, 0xc0, 0, 1, dummy_buffer, 2, 100);
71 if (retval != 2) {
72 dev_err(&udev->dev, "First magic command failed: %d.\n",
73 retval);
74 goto exit;
75 }
76
77 dbg(&udev->dev, "Sending second magic command\n");
78 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
79 0xa2, 0x40, 0, 1, dummy_buffer, 0, 100);
80 if (retval != 0) {
81 dev_err(&udev->dev, "Second magic command failed: %d.\n",
82 retval);
83 goto exit;
84 }
85
86 dbg(&udev->dev, "Calling set_configuration\n");
87 retval = usb_driver_set_configuration(udev, 1);
88 if (retval)
89 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
90
91exit:
92 kfree(dummy_buffer);
93 return retval;
94}
95
96static int magic_dual_mode(struct usb_device *udev)
97{
98 char *dummy_buffer = kzalloc(2, GFP_KERNEL);
99 int retval;
100
101 if (!dummy_buffer)
102 return -ENOMEM;
103
104 /* send magic command so that the Blackberry Pearl device exposes
105 * two interfaces: both the USB mass-storage one and one which can
106 * be used for database access. */
107 dbg(&udev->dev, "Sending magic pearl command\n");
108 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
109 0xa9, 0xc0, 1, 1, dummy_buffer, 2, 100);
110 dbg(&udev->dev, "Magic pearl command returned %d\n", retval);
111
112 dbg(&udev->dev, "Calling set_configuration\n");
113 retval = usb_driver_set_configuration(udev, 1);
114 if (retval)
115 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
116
117 kfree(dummy_buffer);
118 return retval;
119}
120
121static int berry_probe(struct usb_interface *intf,
122 const struct usb_device_id *id)
123{
124 struct usb_device *udev = interface_to_usbdev(intf);
125
126 if (udev->bus_mA < 500) {
127 dbg(&udev->dev, "Not enough power to charge available\n");
128 return -ENODEV;
129 }
130
131 dbg(&udev->dev, "Power is set to %dmA\n",
132 udev->actconfig->desc.bMaxPower * 2);
133
134 /* check the power usage so we don't try to enable something that is
135 * already enabled */
136 if ((udev->actconfig->desc.bMaxPower * 2) == 500) {
137 dbg(&udev->dev, "device is already charging, power is "
138 "set to %dmA\n", udev->actconfig->desc.bMaxPower * 2);
139 return -ENODEV;
140 }
141
142 /* turn the power on */
143 magic_charge(udev);
144
145 if ((le16_to_cpu(udev->descriptor.idProduct) == BLACKBERRY_PEARL) &&
146 (pearl_dual_mode))
147 magic_dual_mode(udev);
148
149 /* we don't really want to bind to the device, userspace programs can
150 * handle the syncing just fine, so get outta here. */
151 return -ENODEV;
152}
153
154static void berry_disconnect(struct usb_interface *intf)
155{
156}
157
158static struct usb_driver berry_driver = {
159 .name = "berry_charge",
160 .probe = berry_probe,
161 .disconnect = berry_disconnect,
162 .id_table = id_table,
163};
164
165static int __init berry_init(void)
166{
167 return usb_register(&berry_driver);
168}
169
170static void __exit berry_exit(void)
171{
172 usb_deregister(&berry_driver);
173}
174
175module_init(berry_init);
176module_exit(berry_exit);
177
178MODULE_LICENSE("GPL");
179MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
180module_param(debug, bool, S_IRUGO | S_IWUSR);
181MODULE_PARM_DESC(debug, "Debug enabled or not");
182module_param(pearl_dual_mode, bool, S_IRUGO | S_IWUSR);
183MODULE_PARM_DESC(pearl_dual_mode, "Change Blackberry Pearl to run in dual mode");
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 5720bfef6a38..1547d8cac5fb 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -56,7 +56,7 @@
56 56
57 57
58/* table of devices that work with this driver */ 58/* table of devices that work with this driver */
59static struct usb_device_id cypress_table [] = { 59static const struct usb_device_id cypress_table[] = {
60 { USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) }, 60 { USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) },
61 { } 61 { }
62}; 62};
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 4fb3c38b924b..b9cbbbda8245 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -27,7 +27,7 @@
27#define USB_SKEL_VENDOR_ID 0x04b4 27#define USB_SKEL_VENDOR_ID 0x04b4
28#define USB_SKEL_PRODUCT_ID 0x0002 28#define USB_SKEL_PRODUCT_ID 0x0002
29 29
30static struct usb_device_id id_table [] = { 30static const struct usb_device_id id_table[] = {
31 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, 31 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
32 { } 32 { }
33}; 33};
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 879a980ca8c4..a6521c95f683 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -245,7 +245,7 @@ wraperr:
245 return err; 245 return err;
246} 246}
247 247
248static struct usb_device_id id_table [] = { 248static const struct usb_device_id id_table[] = {
249 { USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) }, 249 { USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) },
250 { USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) }, 250 { USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) },
251 { } /* Terminating entry */ 251 { } /* Terminating entry */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 59860b328534..fc15ad4c3139 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -259,7 +259,7 @@ wraperr:
259 return err; 259 return err;
260} 260}
261 261
262static __devinitdata struct usb_device_id id_table [] = { 262static const struct usb_device_id id_table[] __devinitconst = {
263 { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, 263 { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
264 { } /* Terminating entry */ 264 { } /* Terminating entry */
265}; 265};
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9d0675ed0d4c..1edb6d361896 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -86,7 +86,7 @@ static struct list_head ftdi_static_list;
86#define USB_FTDI_ELAN_VENDOR_ID 0x0403 86#define USB_FTDI_ELAN_VENDOR_ID 0x0403
87#define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea 87#define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea
88/* table of devices that work with this driver*/ 88/* table of devices that work with this driver*/
89static struct usb_device_id ftdi_elan_table[] = { 89static const struct usb_device_id ftdi_elan_table[] = {
90 {USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)}, 90 {USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)},
91 { /* Terminating entry */ } 91 { /* Terminating entry */ }
92}; 92};
@@ -623,9 +623,12 @@ static void ftdi_elan_status_work(struct work_struct *work)
623*/ 623*/
624static int ftdi_elan_open(struct inode *inode, struct file *file) 624static int ftdi_elan_open(struct inode *inode, struct file *file)
625{ 625{
626 int subminor = iminor(inode); 626 int subminor;
627 struct usb_interface *interface = usb_find_interface(&ftdi_elan_driver, 627 struct usb_interface *interface;
628 subminor); 628
629 subminor = iminor(inode);
630 interface = usb_find_interface(&ftdi_elan_driver, subminor);
631
629 if (!interface) { 632 if (!interface) {
630 printk(KERN_ERR "can't find device for minor %d\n", subminor); 633 printk(KERN_ERR "can't find device for minor %d\n", subminor);
631 return -ENODEV; 634 return -ENODEV;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 1337a9ce80b9..a54c3cb804ce 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -48,7 +48,7 @@
48#define ID_CHERRY 0x0010 48#define ID_CHERRY 0x0010
49 49
50/* device ID table */ 50/* device ID table */
51static struct usb_device_id idmouse_table[] = { 51static const struct usb_device_id idmouse_table[] = {
52 {USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */ 52 {USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */
53 {USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */ 53 {USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */
54 {} /* terminating null entry */ 54 {} /* terminating null entry */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index e75bb87ee92b..d3c852363883 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -139,7 +139,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
139/* driver registration */ 139/* driver registration */
140/*---------------------*/ 140/*---------------------*/
141/* table of devices that work with this driver */ 141/* table of devices that work with this driver */
142static struct usb_device_id iowarrior_ids[] = { 142static const struct usb_device_id iowarrior_ids[] = {
143 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)}, 143 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)},
144 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)}, 144 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)},
145 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)}, 145 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
@@ -602,10 +602,12 @@ static int iowarrior_open(struct inode *inode, struct file *file)
602 602
603 dbg("%s", __func__); 603 dbg("%s", __func__);
604 604
605 lock_kernel();
605 subminor = iminor(inode); 606 subminor = iminor(inode);
606 607
607 interface = usb_find_interface(&iowarrior_driver, subminor); 608 interface = usb_find_interface(&iowarrior_driver, subminor);
608 if (!interface) { 609 if (!interface) {
610 unlock_kernel();
609 err("%s - error, can't find device for minor %d", __func__, 611 err("%s - error, can't find device for minor %d", __func__,
610 subminor); 612 subminor);
611 return -ENODEV; 613 return -ENODEV;
@@ -615,6 +617,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
615 dev = usb_get_intfdata(interface); 617 dev = usb_get_intfdata(interface);
616 if (!dev) { 618 if (!dev) {
617 mutex_unlock(&iowarrior_open_disc_lock); 619 mutex_unlock(&iowarrior_open_disc_lock);
620 unlock_kernel();
618 return -ENODEV; 621 return -ENODEV;
619 } 622 }
620 623
@@ -641,6 +644,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
641 644
642out: 645out:
643 mutex_unlock(&dev->mutex); 646 mutex_unlock(&dev->mutex);
647 unlock_kernel();
644 return retval; 648 return retval;
645} 649}
646 650
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index b897f6554ecd..06e990adc6cd 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -26,7 +26,7 @@
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/module.h> 27#include <linux/module.h>
28 28
29static struct usb_device_id id_table[] = { 29static const struct usb_device_id id_table[] = {
30 {USB_DEVICE(0x05ac, 0x8300)}, 30 {USB_DEVICE(0x05ac, 0x8300)},
31 {}, 31 {},
32}; 32};
@@ -112,6 +112,8 @@ out:
112 return ret; 112 return ret;
113} 113}
114 114
115MODULE_FIRMWARE("isight.fw");
116
115static void isight_firmware_disconnect(struct usb_interface *intf) 117static void isight_firmware_disconnect(struct usb_interface *intf)
116{ 118{
117} 119}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 90f130126c10..dd41d8710043 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -69,7 +69,7 @@
69#endif 69#endif
70 70
71/* table of devices that work with this driver */ 71/* table of devices that work with this driver */
72static struct usb_device_id ld_usb_table [] = { 72static const struct usb_device_id ld_usb_table[] = {
73 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, 73 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
74 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, 74 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
75 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) }, 75 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
@@ -798,7 +798,7 @@ static int __init ld_usb_init(void)
798 /* register this driver with the USB subsystem */ 798 /* register this driver with the USB subsystem */
799 retval = usb_register(&ld_usb_driver); 799 retval = usb_register(&ld_usb_driver);
800 if (retval) 800 if (retval)
801 err("usb_register failed for the "__FILE__" driver. Error number %d\n", retval); 801 err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
802 802
803 return retval; 803 return retval;
804} 804}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index faa6d623de78..8547bf9e3175 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -95,8 +95,11 @@
95 95
96/* Use our own dbg macro */ 96/* Use our own dbg macro */
97#undef dbg 97#undef dbg
98#define dbg(lvl, format, arg...) do { if (debug >= lvl) printk(KERN_DEBUG __FILE__ ": " format "\n", ## arg); } while (0) 98#define dbg(lvl, format, arg...) \
99 99do { \
100 if (debug >= lvl) \
101 printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
102} while (0)
100 103
101/* Version Information */ 104/* Version Information */
102#define DRIVER_VERSION "v0.96" 105#define DRIVER_VERSION "v0.96"
@@ -192,7 +195,7 @@ struct tower_get_version_reply {
192 195
193 196
194/* table of devices that work with this driver */ 197/* table of devices that work with this driver */
195static struct usb_device_id tower_table [] = { 198static const struct usb_device_id tower_table[] = {
196 { USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) }, 199 { USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) },
197 { } /* Terminating entry */ 200 { } /* Terminating entry */
198}; 201};
@@ -302,7 +305,7 @@ static inline void lego_usb_tower_debug_data (int level, const char *function, i
302 if (debug < level) 305 if (debug < level)
303 return; 306 return;
304 307
305 printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", function, size); 308 printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size);
306 for (i = 0; i < size; ++i) { 309 for (i = 0; i < size; ++i) {
307 printk ("%.2x ", data[i]); 310 printk ("%.2x ", data[i]);
308 } 311 }
@@ -1055,7 +1058,7 @@ static int __init lego_usb_tower_init(void)
1055 /* register this driver with the USB subsystem */ 1058 /* register this driver with the USB subsystem */
1056 result = usb_register(&tower_driver); 1059 result = usb_register(&tower_driver);
1057 if (result < 0) { 1060 if (result < 0) {
1058 err("usb_register failed for the "__FILE__" driver. Error number %d", result); 1061 err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
1059 retval = -1; 1062 retval = -1;
1060 goto exit; 1063 goto exit;
1061 } 1064 }
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 32d0199d0c32..a85771b1563d 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -78,10 +78,13 @@ static int open_rio(struct inode *inode, struct file *file)
78{ 78{
79 struct rio_usb_data *rio = &rio_instance; 79 struct rio_usb_data *rio = &rio_instance;
80 80
81 /* against disconnect() */
82 lock_kernel();
81 mutex_lock(&(rio->lock)); 83 mutex_lock(&(rio->lock));
82 84
83 if (rio->isopen || !rio->present) { 85 if (rio->isopen || !rio->present) {
84 mutex_unlock(&(rio->lock)); 86 mutex_unlock(&(rio->lock));
87 unlock_kernel();
85 return -EBUSY; 88 return -EBUSY;
86 } 89 }
87 rio->isopen = 1; 90 rio->isopen = 1;
@@ -91,6 +94,7 @@ static int open_rio(struct inode *inode, struct file *file)
91 mutex_unlock(&(rio->lock)); 94 mutex_unlock(&(rio->lock));
92 95
93 dev_info(&rio->rio_dev->dev, "Rio opened.\n"); 96 dev_info(&rio->rio_dev->dev, "Rio opened.\n");
97 unlock_kernel();
94 98
95 return 0; 99 return 0;
96} 100}
@@ -115,7 +119,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
115 int retries; 119 int retries;
116 int retval=0; 120 int retval=0;
117 121
118 lock_kernel();
119 mutex_lock(&(rio->lock)); 122 mutex_lock(&(rio->lock));
120 /* Sanity check to make sure rio is connected, powered, etc */ 123 /* Sanity check to make sure rio is connected, powered, etc */
121 if (rio->present == 0 || rio->rio_dev == NULL) { 124 if (rio->present == 0 || rio->rio_dev == NULL) {
@@ -254,7 +257,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
254 257
255err_out: 258err_out:
256 mutex_unlock(&(rio->lock)); 259 mutex_unlock(&(rio->lock));
257 unlock_kernel();
258 return retval; 260 return retval;
259} 261}
260 262
@@ -489,6 +491,7 @@ static void disconnect_rio(struct usb_interface *intf)
489 struct rio_usb_data *rio = usb_get_intfdata (intf); 491 struct rio_usb_data *rio = usb_get_intfdata (intf);
490 492
491 usb_set_intfdata (intf, NULL); 493 usb_set_intfdata (intf, NULL);
494 lock_kernel();
492 if (rio) { 495 if (rio) {
493 usb_deregister_dev(intf, &usb_rio_class); 496 usb_deregister_dev(intf, &usb_rio_class);
494 497
@@ -498,6 +501,7 @@ static void disconnect_rio(struct usb_interface *intf)
498 /* better let it finish - the release will do whats needed */ 501 /* better let it finish - the release will do whats needed */
499 rio->rio_dev = NULL; 502 rio->rio_dev = NULL;
500 mutex_unlock(&(rio->lock)); 503 mutex_unlock(&(rio->lock));
504 unlock_kernel();
501 return; 505 return;
502 } 506 }
503 kfree(rio->ibuf); 507 kfree(rio->ibuf);
@@ -508,9 +512,10 @@ static void disconnect_rio(struct usb_interface *intf)
508 rio->present = 0; 512 rio->present = 0;
509 mutex_unlock(&(rio->lock)); 513 mutex_unlock(&(rio->lock));
510 } 514 }
515 unlock_kernel();
511} 516}
512 517
513static struct usb_device_id rio_table [] = { 518static const struct usb_device_id rio_table[] = {
514 { USB_DEVICE(0x0841, 1) }, /* Rio 500 */ 519 { USB_DEVICE(0x0841, 1) }, /* Rio 500 */
515 { } /* Terminating entry */ 520 { } /* Terminating entry */
516}; 521};
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 8b37a4b9839e..aae95a009bd5 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -250,7 +250,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
250 sisusb->urbstatus[index] |= SU_URB_BUSY; 250 sisusb->urbstatus[index] |= SU_URB_BUSY;
251 251
252 /* Submit URB */ 252 /* Submit URB */
253 retval = usb_submit_urb(urb, GFP_ATOMIC); 253 retval = usb_submit_urb(urb, GFP_KERNEL);
254 254
255 /* If OK, and if timeout > 0, wait for completion */ 255 /* If OK, and if timeout > 0, wait for completion */
256 if ((retval == 0) && timeout) { 256 if ((retval == 0) && timeout) {
@@ -306,7 +306,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
306 urb->actual_length = 0; 306 urb->actual_length = 0;
307 307
308 sisusb->completein = 0; 308 sisusb->completein = 0;
309 retval = usb_submit_urb(urb, GFP_ATOMIC); 309 retval = usb_submit_urb(urb, GFP_KERNEL);
310 if (retval == 0) { 310 if (retval == 0) {
311 wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout); 311 wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout);
312 if (!sisusb->completein) { 312 if (!sisusb->completein) {
@@ -2416,21 +2416,28 @@ sisusb_open(struct inode *inode, struct file *file)
2416 struct usb_interface *interface; 2416 struct usb_interface *interface;
2417 int subminor = iminor(inode); 2417 int subminor = iminor(inode);
2418 2418
2419 if (!(interface = usb_find_interface(&sisusb_driver, subminor))) 2419 lock_kernel();
2420 if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
2421 unlock_kernel();
2420 return -ENODEV; 2422 return -ENODEV;
2423 }
2421 2424
2422 if (!(sisusb = usb_get_intfdata(interface))) 2425 if (!(sisusb = usb_get_intfdata(interface))) {
2426 unlock_kernel();
2423 return -ENODEV; 2427 return -ENODEV;
2428 }
2424 2429
2425 mutex_lock(&sisusb->lock); 2430 mutex_lock(&sisusb->lock);
2426 2431
2427 if (!sisusb->present || !sisusb->ready) { 2432 if (!sisusb->present || !sisusb->ready) {
2428 mutex_unlock(&sisusb->lock); 2433 mutex_unlock(&sisusb->lock);
2434 unlock_kernel();
2429 return -ENODEV; 2435 return -ENODEV;
2430 } 2436 }
2431 2437
2432 if (sisusb->isopen) { 2438 if (sisusb->isopen) {
2433 mutex_unlock(&sisusb->lock); 2439 mutex_unlock(&sisusb->lock);
2440 unlock_kernel();
2434 return -EBUSY; 2441 return -EBUSY;
2435 } 2442 }
2436 2443
@@ -2439,11 +2446,13 @@ sisusb_open(struct inode *inode, struct file *file)
2439 if (sisusb_init_gfxdevice(sisusb, 0)) { 2446 if (sisusb_init_gfxdevice(sisusb, 0)) {
2440 mutex_unlock(&sisusb->lock); 2447 mutex_unlock(&sisusb->lock);
2441 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n"); 2448 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
2449 unlock_kernel();
2442 return -EIO; 2450 return -EIO;
2443 } 2451 }
2444 } else { 2452 } else {
2445 mutex_unlock(&sisusb->lock); 2453 mutex_unlock(&sisusb->lock);
2446 dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n"); 2454 dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
2455 unlock_kernel();
2447 return -EIO; 2456 return -EIO;
2448 } 2457 }
2449 } 2458 }
@@ -2456,6 +2465,7 @@ sisusb_open(struct inode *inode, struct file *file)
2456 file->private_data = sisusb; 2465 file->private_data = sisusb;
2457 2466
2458 mutex_unlock(&sisusb->lock); 2467 mutex_unlock(&sisusb->lock);
2468 unlock_kernel();
2459 2469
2460 return 0; 2470 return 0;
2461} 2471}
@@ -3238,7 +3248,7 @@ static void sisusb_disconnect(struct usb_interface *intf)
3238 kref_put(&sisusb->kref, sisusb_delete); 3248 kref_put(&sisusb->kref, sisusb_delete);
3239} 3249}
3240 3250
3241static struct usb_device_id sisusb_table [] = { 3251static const struct usb_device_id sisusb_table[] = {
3242 { USB_DEVICE(0x0711, 0x0550) }, 3252 { USB_DEVICE(0x0711, 0x0550) },
3243 { USB_DEVICE(0x0711, 0x0900) }, 3253 { USB_DEVICE(0x0711, 0x0900) },
3244 { USB_DEVICE(0x0711, 0x0901) }, 3254 { USB_DEVICE(0x0711, 0x0901) },
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 2e14102955c5..5da28eaee314 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -33,7 +33,7 @@
33#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */ 33#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */
34#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */ 34#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */
35 35
36static struct usb_device_id id_table [] = { 36static const struct usb_device_id id_table[] = {
37 { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) }, 37 { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) },
38 { }, 38 { },
39}; 39};
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 4fb120357c55..90aede90553e 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -30,7 +30,7 @@
30#define IOCTL_GET_DRV_VERSION 2 30#define IOCTL_GET_DRV_VERSION 2
31 31
32 32
33static struct usb_device_id id_table [] = { 33static const struct usb_device_id id_table[] = {
34 { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, }, 34 { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
35 { }, 35 { },
36}; 36};
@@ -74,10 +74,12 @@ static int lcd_open(struct inode *inode, struct file *file)
74 struct usb_interface *interface; 74 struct usb_interface *interface;
75 int subminor, r; 75 int subminor, r;
76 76
77 lock_kernel();
77 subminor = iminor(inode); 78 subminor = iminor(inode);
78 79
79 interface = usb_find_interface(&lcd_driver, subminor); 80 interface = usb_find_interface(&lcd_driver, subminor);
80 if (!interface) { 81 if (!interface) {
82 unlock_kernel();
81 err ("USBLCD: %s - error, can't find device for minor %d", 83 err ("USBLCD: %s - error, can't find device for minor %d",
82 __func__, subminor); 84 __func__, subminor);
83 return -ENODEV; 85 return -ENODEV;
@@ -87,6 +89,7 @@ static int lcd_open(struct inode *inode, struct file *file)
87 dev = usb_get_intfdata(interface); 89 dev = usb_get_intfdata(interface);
88 if (!dev) { 90 if (!dev) {
89 mutex_unlock(&open_disc_mutex); 91 mutex_unlock(&open_disc_mutex);
92 unlock_kernel();
90 return -ENODEV; 93 return -ENODEV;
91 } 94 }
92 95
@@ -98,11 +101,13 @@ static int lcd_open(struct inode *inode, struct file *file)
98 r = usb_autopm_get_interface(interface); 101 r = usb_autopm_get_interface(interface);
99 if (r < 0) { 102 if (r < 0) {
100 kref_put(&dev->kref, lcd_delete); 103 kref_put(&dev->kref, lcd_delete);
104 unlock_kernel();
101 return r; 105 return r;
102 } 106 }
103 107
104 /* save our object in the file's private structure */ 108 /* save our object in the file's private structure */
105 file->private_data = dev; 109 file->private_data = dev;
110 unlock_kernel();
106 111
107 return 0; 112 return 0;
108} 113}
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 06cb71942dc7..63da2c3c838f 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -24,7 +24,7 @@
24#define PRODUCT_ID 0x1223 24#define PRODUCT_ID 0x1223
25 25
26/* table of devices that work with this driver */ 26/* table of devices that work with this driver */
27static struct usb_device_id id_table [] = { 27static const struct usb_device_id id_table[] = {
28 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) }, 28 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
29 { }, 29 { },
30}; 30};
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 3db255537e79..a9555cb901a1 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -27,7 +27,7 @@
27#define MAXLEN 6 27#define MAXLEN 6
28 28
29/* table of devices that work with this driver */ 29/* table of devices that work with this driver */
30static struct usb_device_id id_table[] = { 30static const struct usb_device_id id_table[] = {
31 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) }, 31 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
32 { }, 32 { },
33}; 33};
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 3dab0c0b196f..a21cce6f7403 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1580,10 +1580,6 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1580 return -ERESTARTSYS; 1580 return -ERESTARTSYS;
1581 1581
1582 /* FIXME: What if a system sleep starts while a test is running? */ 1582 /* FIXME: What if a system sleep starts while a test is running? */
1583 if (!intf->is_active) {
1584 mutex_unlock(&dev->lock);
1585 return -EHOSTUNREACH;
1586 }
1587 1583
1588 /* some devices, like ez-usb default devices, need a non-default 1584 /* some devices, like ez-usb default devices, need a non-default
1589 * altsetting to have any active endpoints. some tests change 1585 * altsetting to have any active endpoints. some tests change
@@ -2101,7 +2097,7 @@ static struct usbtest_info generic_info = {
2101#endif 2097#endif
2102 2098
2103 2099
2104static struct usb_device_id id_table [] = { 2100static const struct usb_device_id id_table[] = {
2105 2101
2106 /*-------------------------------------------------------------*/ 2102 /*-------------------------------------------------------------*/
2107 2103
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 9a6c27a01793..f56fed53f2dd 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -770,7 +770,7 @@ static void uss720_disconnect(struct usb_interface *intf)
770} 770}
771 771
772/* table of cables that work through this driver */ 772/* table of cables that work through this driver */
773static struct usb_device_id uss720_table [] = { 773static const struct usb_device_id uss720_table[] = {
774 { USB_DEVICE(0x047e, 0x1001) }, 774 { USB_DEVICE(0x047e, 0x1001) },
775 { USB_DEVICE(0x0557, 0x2001) }, 775 { USB_DEVICE(0x0557, 0x2001) },
776 { USB_DEVICE(0x0729, 0x1284) }, 776 { USB_DEVICE(0x0729, 0x1284) },
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c
deleted file mode 100644
index f26ea8dc1577..000000000000
--- a/drivers/usb/misc/vstusb.c
+++ /dev/null
@@ -1,783 +0,0 @@
1/*****************************************************************************
2 * File: drivers/usb/misc/vstusb.c
3 *
4 * Purpose: Support for the bulk USB Vernier Spectrophotometers
5 *
6 * Author: Johnnie Peters
7 * Axian Consulting
8 * Beaverton, OR, USA 97005
9 *
10 * Modified by: EQware Engineering, Inc.
11 * Oregon City, OR, USA 97045
12 *
13 * Copyright: 2007, 2008
14 * Vernier Software & Technology
15 * Beaverton, OR, USA 97005
16 *
17 * Web: www.vernier.com
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 *
23 *****************************************************************************/
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30#include <linux/uaccess.h>
31#include <linux/usb.h>
32
33#include <linux/usb/vstusb.h>
34
35#define DRIVER_VERSION "VST USB Driver Version 1.5"
36#define DRIVER_DESC "Vernier Software Technology Bulk USB Driver"
37
38#ifdef CONFIG_USB_DYNAMIC_MINORS
39 #define VSTUSB_MINOR_BASE 0
40#else
41 #define VSTUSB_MINOR_BASE 199
42#endif
43
44#define USB_VENDOR_OCEANOPTICS 0x2457
45#define USB_VENDOR_VERNIER 0x08F7 /* Vernier Software & Technology */
46
47#define USB_PRODUCT_USB2000 0x1002
48#define USB_PRODUCT_ADC1000_FW 0x1003 /* firmware download (renumerates) */
49#define USB_PRODUCT_ADC1000 0x1004
50#define USB_PRODUCT_HR2000_FW 0x1009 /* firmware download (renumerates) */
51#define USB_PRODUCT_HR2000 0x100A
52#define USB_PRODUCT_HR4000_FW 0x1011 /* firmware download (renumerates) */
53#define USB_PRODUCT_HR4000 0x1012
54#define USB_PRODUCT_USB650 0x1014 /* "Red Tide" */
55#define USB_PRODUCT_QE65000 0x1018
56#define USB_PRODUCT_USB4000 0x1022
57#define USB_PRODUCT_USB325 0x1024 /* "Vernier Spectrometer" */
58
59#define USB_PRODUCT_LABPRO 0x0001
60#define USB_PRODUCT_LABQUEST 0x0005
61
62#define VST_MAXBUFFER (64*1024)
63
64static struct usb_device_id id_table[] = {
65 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB2000)},
66 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_HR4000)},
67 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB650)},
68 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB4000)},
69 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB325)},
70 { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABQUEST)},
71 { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABPRO)},
72 {},
73};
74
75MODULE_DEVICE_TABLE(usb, id_table);
76
77struct vstusb_device {
78 struct kref kref;
79 struct mutex lock;
80 struct usb_device *usb_dev;
81 char present;
82 char isopen;
83 struct usb_anchor submitted;
84 int rd_pipe;
85 int rd_timeout_ms;
86 int wr_pipe;
87 int wr_timeout_ms;
88};
89#define to_vst_dev(d) container_of(d, struct vstusb_device, kref)
90
91static struct usb_driver vstusb_driver;
92
93static void vstusb_delete(struct kref *kref)
94{
95 struct vstusb_device *vstdev = to_vst_dev(kref);
96
97 usb_put_dev(vstdev->usb_dev);
98 kfree(vstdev);
99}
100
101static int vstusb_open(struct inode *inode, struct file *file)
102{
103 struct vstusb_device *vstdev;
104 struct usb_interface *interface;
105
106 interface = usb_find_interface(&vstusb_driver, iminor(inode));
107
108 if (!interface) {
109 printk(KERN_ERR KBUILD_MODNAME
110 ": %s - error, can't find device for minor %d\n",
111 __func__, iminor(inode));
112 return -ENODEV;
113 }
114
115 vstdev = usb_get_intfdata(interface);
116
117 if (!vstdev)
118 return -ENODEV;
119
120 /* lock this device */
121 mutex_lock(&vstdev->lock);
122
123 /* can only open one time */
124 if ((!vstdev->present) || (vstdev->isopen)) {
125 mutex_unlock(&vstdev->lock);
126 return -EBUSY;
127 }
128
129 /* increment our usage count */
130 kref_get(&vstdev->kref);
131
132 vstdev->isopen = 1;
133
134 /* save device in the file's private structure */
135 file->private_data = vstdev;
136
137 dev_dbg(&vstdev->usb_dev->dev, "%s: opened\n", __func__);
138
139 mutex_unlock(&vstdev->lock);
140
141 return 0;
142}
143
144static int vstusb_release(struct inode *inode, struct file *file)
145{
146 struct vstusb_device *vstdev;
147
148 vstdev = file->private_data;
149
150 if (vstdev == NULL)
151 return -ENODEV;
152
153 mutex_lock(&vstdev->lock);
154
155 vstdev->isopen = 0;
156
157 dev_dbg(&vstdev->usb_dev->dev, "%s: released\n", __func__);
158
159 mutex_unlock(&vstdev->lock);
160
161 kref_put(&vstdev->kref, vstusb_delete);
162
163 return 0;
164}
165
166static void usb_api_blocking_completion(struct urb *urb)
167{
168 struct completion *completeit = urb->context;
169
170 complete(completeit);
171}
172
173static int vstusb_fill_and_send_urb(struct urb *urb,
174 struct usb_device *usb_dev,
175 unsigned int pipe, void *data,
176 unsigned int len, struct completion *done)
177{
178 struct usb_host_endpoint *ep;
179 struct usb_host_endpoint **hostep;
180 unsigned int pipend;
181
182 int status;
183
184 hostep = usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out;
185 pipend = usb_pipeendpoint(pipe);
186 ep = hostep[pipend];
187
188 if (!ep || (len == 0))
189 return -EINVAL;
190
191 if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
192 == USB_ENDPOINT_XFER_INT) {
193 pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
194 usb_fill_int_urb(urb, usb_dev, pipe, data, len,
195 (usb_complete_t)usb_api_blocking_completion,
196 NULL, ep->desc.bInterval);
197 } else
198 usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
199 (usb_complete_t)usb_api_blocking_completion,
200 NULL);
201
202 init_completion(done);
203 urb->context = done;
204 urb->actual_length = 0;
205 status = usb_submit_urb(urb, GFP_KERNEL);
206
207 return status;
208}
209
210static int vstusb_complete_urb(struct urb *urb, struct completion *done,
211 int timeout, int *actual_length)
212{
213 unsigned long expire;
214 int status;
215
216 expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
217 if (!wait_for_completion_interruptible_timeout(done, expire)) {
218 usb_kill_urb(urb);
219 status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status;
220
221 dev_dbg(&urb->dev->dev,
222 "%s timed out on ep%d%s len=%d/%d, urb status = %d\n",
223 current->comm,
224 usb_pipeendpoint(urb->pipe),
225 usb_pipein(urb->pipe) ? "in" : "out",
226 urb->actual_length,
227 urb->transfer_buffer_length,
228 urb->status);
229
230 } else {
231 if (signal_pending(current)) {
232 /* if really an error */
233 if (urb->status && !((urb->status == -ENOENT) ||
234 (urb->status == -ECONNRESET) ||
235 (urb->status == -ESHUTDOWN))) {
236 status = -EINTR;
237 usb_kill_urb(urb);
238 } else {
239 status = 0;
240 }
241
242 dev_dbg(&urb->dev->dev,
243 "%s: signal pending on ep%d%s len=%d/%d,"
244 "urb status = %d\n",
245 current->comm,
246 usb_pipeendpoint(urb->pipe),
247 usb_pipein(urb->pipe) ? "in" : "out",
248 urb->actual_length,
249 urb->transfer_buffer_length,
250 urb->status);
251
252 } else {
253 status = urb->status;
254 }
255 }
256
257 if (actual_length)
258 *actual_length = urb->actual_length;
259
260 return status;
261}
262
263static ssize_t vstusb_read(struct file *file, char __user *buffer,
264 size_t count, loff_t *ppos)
265{
266 struct vstusb_device *vstdev;
267 int cnt = -1;
268 void *buf;
269 int retval = 0;
270
271 struct urb *urb;
272 struct usb_device *dev;
273 unsigned int pipe;
274 int timeout;
275
276 DECLARE_COMPLETION_ONSTACK(done);
277
278 vstdev = file->private_data;
279
280 if (vstdev == NULL)
281 return -ENODEV;
282
283 /* verify that we actually want to read some data */
284 if ((count == 0) || (count > VST_MAXBUFFER))
285 return -EINVAL;
286
287 /* lock this object */
288 if (mutex_lock_interruptible(&vstdev->lock))
289 return -ERESTARTSYS;
290
291 /* anyone home */
292 if (!vstdev->present) {
293 mutex_unlock(&vstdev->lock);
294 printk(KERN_ERR KBUILD_MODNAME
295 ": %s: device not present\n", __func__);
296 return -ENODEV;
297 }
298
299 /* pull out the necessary data */
300 dev = vstdev->usb_dev;
301 pipe = usb_rcvbulkpipe(dev, vstdev->rd_pipe);
302 timeout = vstdev->rd_timeout_ms;
303
304 buf = kmalloc(count, GFP_KERNEL);
305 if (buf == NULL) {
306 mutex_unlock(&vstdev->lock);
307 return -ENOMEM;
308 }
309
310 urb = usb_alloc_urb(0, GFP_KERNEL);
311 if (!urb) {
312 kfree(buf);
313 mutex_unlock(&vstdev->lock);
314 return -ENOMEM;
315 }
316
317 usb_anchor_urb(urb, &vstdev->submitted);
318 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
319 mutex_unlock(&vstdev->lock);
320 if (retval) {
321 usb_unanchor_urb(urb);
322 dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
323 __func__, retval, pipe);
324 goto exit;
325 }
326
327 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
328 if (retval) {
329 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
330 __func__, retval, pipe);
331 goto exit;
332 }
333
334 if (copy_to_user(buffer, buf, cnt)) {
335 dev_err(&dev->dev, "%s: can't copy_to_user\n", __func__);
336 retval = -EFAULT;
337 } else {
338 retval = cnt;
339 dev_dbg(&dev->dev, "%s: read %d bytes from pipe %d\n",
340 __func__, cnt, pipe);
341 }
342
343exit:
344 usb_free_urb(urb);
345 kfree(buf);
346 return retval;
347}
348
349static ssize_t vstusb_write(struct file *file, const char __user *buffer,
350 size_t count, loff_t *ppos)
351{
352 struct vstusb_device *vstdev;
353 int cnt = -1;
354 void *buf;
355 int retval = 0;
356
357 struct urb *urb;
358 struct usb_device *dev;
359 unsigned int pipe;
360 int timeout;
361
362 DECLARE_COMPLETION_ONSTACK(done);
363
364 vstdev = file->private_data;
365
366 if (vstdev == NULL)
367 return -ENODEV;
368
369 /* verify that we actually have some data to write */
370 if ((count == 0) || (count > VST_MAXBUFFER))
371 return retval;
372
373 /* lock this object */
374 if (mutex_lock_interruptible(&vstdev->lock))
375 return -ERESTARTSYS;
376
377 /* anyone home */
378 if (!vstdev->present) {
379 mutex_unlock(&vstdev->lock);
380 printk(KERN_ERR KBUILD_MODNAME
381 ": %s: device not present\n", __func__);
382 return -ENODEV;
383 }
384
385 /* pull out the necessary data */
386 dev = vstdev->usb_dev;
387 pipe = usb_sndbulkpipe(dev, vstdev->wr_pipe);
388 timeout = vstdev->wr_timeout_ms;
389
390 buf = kmalloc(count, GFP_KERNEL);
391 if (buf == NULL) {
392 mutex_unlock(&vstdev->lock);
393 return -ENOMEM;
394 }
395
396 urb = usb_alloc_urb(0, GFP_KERNEL);
397 if (!urb) {
398 kfree(buf);
399 mutex_unlock(&vstdev->lock);
400 return -ENOMEM;
401 }
402
403 if (copy_from_user(buf, buffer, count)) {
404 mutex_unlock(&vstdev->lock);
405 dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__);
406 retval = -EFAULT;
407 goto exit;
408 }
409
410 usb_anchor_urb(urb, &vstdev->submitted);
411 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
412 mutex_unlock(&vstdev->lock);
413 if (retval) {
414 usb_unanchor_urb(urb);
415 dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
416 __func__, retval, pipe);
417 goto exit;
418 }
419
420 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
421 if (retval) {
422 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
423 __func__, retval, pipe);
424 goto exit;
425 } else {
426 retval = cnt;
427 dev_dbg(&dev->dev, "%s: sent %d bytes to pipe %d\n",
428 __func__, cnt, pipe);
429 }
430
431exit:
432 usb_free_urb(urb);
433 kfree(buf);
434 return retval;
435}
436
437static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
438{
439 int retval = 0;
440 int cnt = -1;
441 void __user *data = (void __user *)arg;
442 struct vstusb_args usb_data;
443
444 struct vstusb_device *vstdev;
445 void *buffer = NULL; /* must be initialized. buffer is
446 * referenced on exit but not all
447 * ioctls allocate it */
448
449 struct urb *urb = NULL; /* must be initialized. urb is
450 * referenced on exit but not all
451 * ioctls allocate it */
452 struct usb_device *dev;
453 unsigned int pipe;
454 int timeout;
455
456 DECLARE_COMPLETION_ONSTACK(done);
457
458 vstdev = file->private_data;
459
460 if (_IOC_TYPE(cmd) != VST_IOC_MAGIC) {
461 dev_warn(&vstdev->usb_dev->dev,
462 "%s: ioctl command %x, bad ioctl magic %x, "
463 "expected %x\n", __func__, cmd,
464 _IOC_TYPE(cmd), VST_IOC_MAGIC);
465 return -EINVAL;
466 }
467
468 if (vstdev == NULL)
469 return -ENODEV;
470
471 if (copy_from_user(&usb_data, data, sizeof(struct vstusb_args))) {
472 dev_err(&vstdev->usb_dev->dev, "%s: can't copy_from_user\n",
473 __func__);
474 return -EFAULT;
475 }
476
477 /* lock this object */
478 if (mutex_lock_interruptible(&vstdev->lock)) {
479 retval = -ERESTARTSYS;
480 goto exit;
481 }
482
483 /* anyone home */
484 if (!vstdev->present) {
485 mutex_unlock(&vstdev->lock);
486 dev_err(&vstdev->usb_dev->dev, "%s: device not present\n",
487 __func__);
488 retval = -ENODEV;
489 goto exit;
490 }
491
492 /* pull out the necessary data */
493 dev = vstdev->usb_dev;
494
495 switch (cmd) {
496
497 case IOCTL_VSTUSB_CONFIG_RW:
498
499 vstdev->rd_pipe = usb_data.rd_pipe;
500 vstdev->rd_timeout_ms = usb_data.rd_timeout_ms;
501 vstdev->wr_pipe = usb_data.wr_pipe;
502 vstdev->wr_timeout_ms = usb_data.wr_timeout_ms;
503
504 mutex_unlock(&vstdev->lock);
505
506 dev_dbg(&dev->dev, "%s: setting pipes/timeouts, "
507 "rdpipe = %d, rdtimeout = %d, "
508 "wrpipe = %d, wrtimeout = %d\n", __func__,
509 vstdev->rd_pipe, vstdev->rd_timeout_ms,
510 vstdev->wr_pipe, vstdev->wr_timeout_ms);
511 break;
512
513 case IOCTL_VSTUSB_SEND_PIPE:
514
515 if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
516 mutex_unlock(&vstdev->lock);
517 retval = -EINVAL;
518 goto exit;
519 }
520
521 buffer = kmalloc(usb_data.count, GFP_KERNEL);
522 if (buffer == NULL) {
523 mutex_unlock(&vstdev->lock);
524 retval = -ENOMEM;
525 goto exit;
526 }
527
528 urb = usb_alloc_urb(0, GFP_KERNEL);
529 if (!urb) {
530 mutex_unlock(&vstdev->lock);
531 retval = -ENOMEM;
532 goto exit;
533 }
534
535 timeout = usb_data.timeout_ms;
536
537 pipe = usb_sndbulkpipe(dev, usb_data.pipe);
538
539 if (copy_from_user(buffer, usb_data.buffer, usb_data.count)) {
540 dev_err(&dev->dev, "%s: can't copy_from_user\n",
541 __func__);
542 mutex_unlock(&vstdev->lock);
543 retval = -EFAULT;
544 goto exit;
545 }
546
547 usb_anchor_urb(urb, &vstdev->submitted);
548 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
549 usb_data.count, &done);
550 mutex_unlock(&vstdev->lock);
551 if (retval) {
552 usb_unanchor_urb(urb);
553 dev_err(&dev->dev,
554 "%s: error %d filling and sending urb %d\n",
555 __func__, retval, pipe);
556 goto exit;
557 }
558
559 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
560 if (retval) {
561 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
562 __func__, retval, pipe);
563 }
564
565 break;
566 case IOCTL_VSTUSB_RECV_PIPE:
567
568 if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
569 mutex_unlock(&vstdev->lock);
570 retval = -EINVAL;
571 goto exit;
572 }
573
574 buffer = kmalloc(usb_data.count, GFP_KERNEL);
575 if (buffer == NULL) {
576 mutex_unlock(&vstdev->lock);
577 retval = -ENOMEM;
578 goto exit;
579 }
580
581 urb = usb_alloc_urb(0, GFP_KERNEL);
582 if (!urb) {
583 mutex_unlock(&vstdev->lock);
584 retval = -ENOMEM;
585 goto exit;
586 }
587
588 timeout = usb_data.timeout_ms;
589
590 pipe = usb_rcvbulkpipe(dev, usb_data.pipe);
591
592 usb_anchor_urb(urb, &vstdev->submitted);
593 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
594 usb_data.count, &done);
595 mutex_unlock(&vstdev->lock);
596 if (retval) {
597 usb_unanchor_urb(urb);
598 dev_err(&dev->dev,
599 "%s: error %d filling and sending urb %d\n",
600 __func__, retval, pipe);
601 goto exit;
602 }
603
604 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
605 if (retval) {
606 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
607 __func__, retval, pipe);
608 goto exit;
609 }
610
611 if (copy_to_user(usb_data.buffer, buffer, cnt)) {
612 dev_err(&dev->dev, "%s: can't copy_to_user\n",
613 __func__);
614 retval = -EFAULT;
615 goto exit;
616 }
617
618 usb_data.count = cnt;
619 if (copy_to_user(data, &usb_data, sizeof(struct vstusb_args))) {
620 dev_err(&dev->dev, "%s: can't copy_to_user\n",
621 __func__);
622 retval = -EFAULT;
623 } else {
624 dev_dbg(&dev->dev, "%s: recv %zd bytes from pipe %d\n",
625 __func__, usb_data.count, usb_data.pipe);
626 }
627
628 break;
629
630 default:
631 mutex_unlock(&vstdev->lock);
632 dev_warn(&dev->dev, "ioctl_vstusb: invalid ioctl cmd %x\n",
633 cmd);
634 return -EINVAL;
635 break;
636 }
637exit:
638 usb_free_urb(urb);
639 kfree(buffer);
640 return retval;
641}
642
643static const struct file_operations vstusb_fops = {
644 .owner = THIS_MODULE,
645 .read = vstusb_read,
646 .write = vstusb_write,
647 .unlocked_ioctl = vstusb_ioctl,
648 .compat_ioctl = vstusb_ioctl,
649 .open = vstusb_open,
650 .release = vstusb_release,
651};
652
653static struct usb_class_driver usb_vstusb_class = {
654 .name = "usb/vstusb%d",
655 .fops = &vstusb_fops,
656 .minor_base = VSTUSB_MINOR_BASE,
657};
658
659static int vstusb_probe(struct usb_interface *intf,
660 const struct usb_device_id *id)
661{
662 struct usb_device *dev = interface_to_usbdev(intf);
663 struct vstusb_device *vstdev;
664 int i;
665 int retval = 0;
666
667 /* allocate memory for our device state and intialize it */
668
669 vstdev = kzalloc(sizeof(*vstdev), GFP_KERNEL);
670 if (vstdev == NULL)
671 return -ENOMEM;
672
673 /* must do usb_get_dev() prior to kref_init() since the kref_put()
674 * release function will do a usb_put_dev() */
675 usb_get_dev(dev);
676 kref_init(&vstdev->kref);
677 mutex_init(&vstdev->lock);
678
679 i = dev->descriptor.bcdDevice;
680
681 dev_dbg(&intf->dev, "Version %1d%1d.%1d%1d found at address %d\n",
682 (i & 0xF000) >> 12, (i & 0xF00) >> 8,
683 (i & 0xF0) >> 4, (i & 0xF), dev->devnum);
684
685 vstdev->present = 1;
686 vstdev->isopen = 0;
687 vstdev->usb_dev = dev;
688 init_usb_anchor(&vstdev->submitted);
689
690 usb_set_intfdata(intf, vstdev);
691 retval = usb_register_dev(intf, &usb_vstusb_class);
692 if (retval) {
693 dev_err(&intf->dev,
694 "%s: Not able to get a minor for this device.\n",
695 __func__);
696 usb_set_intfdata(intf, NULL);
697 kref_put(&vstdev->kref, vstusb_delete);
698 return retval;
699 }
700
701 /* let the user know what node this device is now attached to */
702 dev_info(&intf->dev,
703 "VST USB Device #%d now attached to major %d minor %d\n",
704 (intf->minor - VSTUSB_MINOR_BASE), USB_MAJOR, intf->minor);
705
706 dev_info(&intf->dev, "%s, %s\n", DRIVER_DESC, DRIVER_VERSION);
707
708 return retval;
709}
710
711static void vstusb_disconnect(struct usb_interface *intf)
712{
713 struct vstusb_device *vstdev = usb_get_intfdata(intf);
714
715 usb_deregister_dev(intf, &usb_vstusb_class);
716 usb_set_intfdata(intf, NULL);
717
718 if (vstdev) {
719
720 mutex_lock(&vstdev->lock);
721 vstdev->present = 0;
722
723 usb_kill_anchored_urbs(&vstdev->submitted);
724
725 mutex_unlock(&vstdev->lock);
726
727 kref_put(&vstdev->kref, vstusb_delete);
728 }
729
730}
731
732static int vstusb_suspend(struct usb_interface *intf, pm_message_t message)
733{
734 struct vstusb_device *vstdev = usb_get_intfdata(intf);
735 int time;
736 if (!vstdev)
737 return 0;
738
739 mutex_lock(&vstdev->lock);
740 time = usb_wait_anchor_empty_timeout(&vstdev->submitted, 1000);
741 if (!time)
742 usb_kill_anchored_urbs(&vstdev->submitted);
743 mutex_unlock(&vstdev->lock);
744
745 return 0;
746}
747
748static int vstusb_resume(struct usb_interface *intf)
749{
750 return 0;
751}
752
753static struct usb_driver vstusb_driver = {
754 .name = "vstusb",
755 .probe = vstusb_probe,
756 .disconnect = vstusb_disconnect,
757 .suspend = vstusb_suspend,
758 .resume = vstusb_resume,
759 .id_table = id_table,
760};
761
762static int __init vstusb_init(void)
763{
764 int rc;
765
766 rc = usb_register(&vstusb_driver);
767 if (rc)
768 printk(KERN_ERR "%s: failed to register (%d)", __func__, rc);
769
770 return rc;
771}
772
773static void __exit vstusb_exit(void)
774{
775 usb_deregister(&vstusb_driver);
776}
777
778module_init(vstusb_init);
779module_exit(vstusb_exit);
780
781MODULE_AUTHOR("Dennis O'Brien/Stephen Ware");
782MODULE_DESCRIPTION(DRIVER_VERSION);
783MODULE_LICENSE("GPL");
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 385ec0520167..6dd44bc1f5ff 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -460,8 +460,8 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
460 char ev_type, int status) 460 char ev_type, int status)
461{ 461{
462 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 462 const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
463 unsigned long flags;
464 struct timeval ts; 463 struct timeval ts;
464 unsigned long flags;
465 unsigned int urb_length; 465 unsigned int urb_length;
466 unsigned int offset; 466 unsigned int offset;
467 unsigned int length; 467 unsigned int length;
@@ -600,10 +600,13 @@ static void mon_bin_complete(void *data, struct urb *urb, int status)
600static void mon_bin_error(void *data, struct urb *urb, int error) 600static void mon_bin_error(void *data, struct urb *urb, int error)
601{ 601{
602 struct mon_reader_bin *rp = data; 602 struct mon_reader_bin *rp = data;
603 struct timeval ts;
603 unsigned long flags; 604 unsigned long flags;
604 unsigned int offset; 605 unsigned int offset;
605 struct mon_bin_hdr *ep; 606 struct mon_bin_hdr *ep;
606 607
608 do_gettimeofday(&ts);
609
607 spin_lock_irqsave(&rp->b_lock, flags); 610 spin_lock_irqsave(&rp->b_lock, flags);
608 611
609 offset = mon_buff_area_alloc(rp, PKT_SIZE); 612 offset = mon_buff_area_alloc(rp, PKT_SIZE);
@@ -623,6 +626,8 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
623 ep->devnum = urb->dev->devnum; 626 ep->devnum = urb->dev->devnum;
624 ep->busnum = urb->dev->bus->busnum; 627 ep->busnum = urb->dev->bus->busnum;
625 ep->id = (unsigned long) urb; 628 ep->id = (unsigned long) urb;
629 ep->ts_sec = ts.tv_sec;
630 ep->ts_usec = ts.tv_usec;
626 ep->status = error; 631 ep->status = error;
627 632
628 ep->flag_setup = '-'; 633 ep->flag_setup = '-';
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 047568ff223d..31c11888ec6a 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -180,7 +180,7 @@ static inline unsigned int mon_get_timestamp(void)
180 unsigned int stamp; 180 unsigned int stamp;
181 181
182 do_gettimeofday(&tval); 182 do_gettimeofday(&tval);
183 stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s. */ 183 stamp = tval.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */
184 stamp = stamp * 1000000 + tval.tv_usec; 184 stamp = stamp * 1000000 + tval.tv_usec;
185 return stamp; 185 return stamp;
186} 186}
@@ -273,12 +273,12 @@ static void mon_text_error(void *data, struct urb *urb, int error)
273 273
274 ep->type = 'E'; 274 ep->type = 'E';
275 ep->id = (unsigned long) urb; 275 ep->id = (unsigned long) urb;
276 ep->busnum = 0; 276 ep->busnum = urb->dev->bus->busnum;
277 ep->devnum = urb->dev->devnum; 277 ep->devnum = urb->dev->devnum;
278 ep->epnum = usb_endpoint_num(&urb->ep->desc); 278 ep->epnum = usb_endpoint_num(&urb->ep->desc);
279 ep->xfertype = usb_endpoint_type(&urb->ep->desc); 279 ep->xfertype = usb_endpoint_type(&urb->ep->desc);
280 ep->is_in = usb_urb_dir_in(urb); 280 ep->is_in = usb_urb_dir_in(urb);
281 ep->tstamp = 0; 281 ep->tstamp = mon_get_timestamp();
282 ep->length = 0; 282 ep->length = 0;
283 ep->status = error; 283 ep->status = error;
284 284
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ad26e6569665..bcee1339d4fd 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -30,7 +30,6 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
30 void __iomem *fifo = hw_ep->fifo; 30 void __iomem *fifo = hw_ep->fifo;
31 void __iomem *epio = hw_ep->regs; 31 void __iomem *epio = hw_ep->regs;
32 u8 epnum = hw_ep->epnum; 32 u8 epnum = hw_ep->epnum;
33 u16 dma_reg = 0;
34 33
35 prefetch((u8 *)src); 34 prefetch((u8 *)src);
36 35
@@ -42,15 +41,17 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
42 dump_fifo_data(src, len); 41 dump_fifo_data(src, len);
43 42
44 if (!ANOMALY_05000380 && epnum != 0) { 43 if (!ANOMALY_05000380 && epnum != 0) {
45 flush_dcache_range((unsigned int)src, 44 u16 dma_reg;
46 (unsigned int)(src + len)); 45
46 flush_dcache_range((unsigned long)src,
47 (unsigned long)(src + len));
47 48
48 /* Setup DMA address register */ 49 /* Setup DMA address register */
49 dma_reg = (u16) ((u32) src & 0xFFFF); 50 dma_reg = (u32)src;
50 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); 51 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
51 SSYNC(); 52 SSYNC();
52 53
53 dma_reg = (u16) (((u32) src >> 16) & 0xFFFF); 54 dma_reg = (u32)src >> 16;
54 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); 55 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
55 SSYNC(); 56 SSYNC();
56 57
@@ -79,12 +80,9 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
79 SSYNC(); 80 SSYNC();
80 81
81 if (unlikely((unsigned long)src & 0x01)) 82 if (unlikely((unsigned long)src & 0x01))
82 outsw_8((unsigned long)fifo, src, 83 outsw_8((unsigned long)fifo, src, (len + 1) >> 1);
83 len & 0x01 ? (len >> 1) + 1 : len >> 1);
84 else 84 else
85 outsw((unsigned long)fifo, src, 85 outsw((unsigned long)fifo, src, (len + 1) >> 1);
86 len & 0x01 ? (len >> 1) + 1 : len >> 1);
87
88 } 86 }
89} 87}
90/* 88/*
@@ -94,19 +92,19 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
94{ 92{
95 void __iomem *fifo = hw_ep->fifo; 93 void __iomem *fifo = hw_ep->fifo;
96 u8 epnum = hw_ep->epnum; 94 u8 epnum = hw_ep->epnum;
97 u16 dma_reg = 0;
98 95
99 if (ANOMALY_05000467 && epnum != 0) { 96 if (ANOMALY_05000467 && epnum != 0) {
97 u16 dma_reg;
100 98
101 invalidate_dcache_range((unsigned int)dst, 99 invalidate_dcache_range((unsigned long)dst,
102 (unsigned int)(dst + len)); 100 (unsigned long)(dst + len));
103 101
104 /* Setup DMA address register */ 102 /* Setup DMA address register */
105 dma_reg = (u16) ((u32) dst & 0xFFFF); 103 dma_reg = (u32)dst;
106 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); 104 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
107 SSYNC(); 105 SSYNC();
108 106
109 dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF); 107 dma_reg = (u32)dst >> 16;
110 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); 108 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
111 SSYNC(); 109 SSYNC();
112 110
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index a44a450c860d..3c69a76ec392 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1191,8 +1191,13 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1191 1191
1192 bd = tx_ch->head; 1192 bd = tx_ch->head;
1193 1193
1194 /*
1195 * If Head is null then this could mean that a abort interrupt
1196 * that needs to be acknowledged.
1197 */
1194 if (NULL == bd) { 1198 if (NULL == bd) {
1195 DBG(1, "null BD\n"); 1199 DBG(1, "null BD\n");
1200 tx_ram->tx_complete = 0;
1196 continue; 1201 continue;
1197 } 1202 }
1198 1203
@@ -1412,15 +1417,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
1412 1417
1413 if (cppi_ch->transmit) { 1418 if (cppi_ch->transmit) {
1414 struct cppi_tx_stateram __iomem *tx_ram; 1419 struct cppi_tx_stateram __iomem *tx_ram;
1415 int enabled;
1416
1417 /* mask interrupts raised to signal teardown complete. */
1418 enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
1419 & (1 << cppi_ch->index);
1420 if (enabled)
1421 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
1422 (1 << cppi_ch->index));
1423
1424 /* REVISIT put timeouts on these controller handshakes */ 1420 /* REVISIT put timeouts on these controller handshakes */
1425 1421
1426 cppi_dump_tx(6, cppi_ch, " (teardown)"); 1422 cppi_dump_tx(6, cppi_ch, " (teardown)");
@@ -1435,7 +1431,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
1435 do { 1431 do {
1436 value = musb_readl(&tx_ram->tx_complete, 0); 1432 value = musb_readl(&tx_ram->tx_complete, 0);
1437 } while (0xFFFFFFFC != value); 1433 } while (0xFFFFFFFC != value);
1438 musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
1439 1434
1440 /* FIXME clean up the transfer state ... here? 1435 /* FIXME clean up the transfer state ... here?
1441 * the completion routine should get called with 1436 * the completion routine should get called with
@@ -1448,23 +1443,15 @@ static int cppi_channel_abort(struct dma_channel *channel)
1448 musb_writew(regs, MUSB_TXCSR, value); 1443 musb_writew(regs, MUSB_TXCSR, value);
1449 musb_writew(regs, MUSB_TXCSR, value); 1444 musb_writew(regs, MUSB_TXCSR, value);
1450 1445
1451 /* While we scrub the TX state RAM, ensure that we clean 1446 /*
1452 * up any interrupt that's currently asserted:
1453 * 1. Write to completion Ptr value 0x1(bit 0 set) 1447 * 1. Write to completion Ptr value 0x1(bit 0 set)
1454 * (write back mode) 1448 * (write back mode)
1455 * 2. Write to completion Ptr value 0x0(bit 0 cleared) 1449 * 2. Wait for abort interrupt and then put the channel in
1456 * (compare mode) 1450 * compare mode by writing 1 to the tx_complete register.
1457 * Value written is compared(for bits 31:2) and when
1458 * equal, interrupt is deasserted.
1459 */ 1451 */
1460 cppi_reset_tx(tx_ram, 1); 1452 cppi_reset_tx(tx_ram, 1);
1461 musb_writel(&tx_ram->tx_complete, 0, 0); 1453 cppi_ch->head = 0;
1462 1454 musb_writel(&tx_ram->tx_complete, 0, 1);
1463 /* re-enable interrupt */
1464 if (enabled)
1465 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
1466 (1 << cppi_ch->index));
1467
1468 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1455 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1469 1456
1470 /* REVISIT tx side _should_ clean up the same way 1457 /* REVISIT tx side _should_ clean up the same way
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 738efd8063b5..b4bbf8f2c238 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -557,6 +557,69 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
557 handled = IRQ_HANDLED; 557 handled = IRQ_HANDLED;
558 } 558 }
559 559
560
561 if (int_usb & MUSB_INTR_SUSPEND) {
562 DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
563 otg_state_string(musb), devctl, power);
564 handled = IRQ_HANDLED;
565
566 switch (musb->xceiv->state) {
567#ifdef CONFIG_USB_MUSB_OTG
568 case OTG_STATE_A_PERIPHERAL:
569 /* We also come here if the cable is removed, since
570 * this silicon doesn't report ID-no-longer-grounded.
571 *
572 * We depend on T(a_wait_bcon) to shut us down, and
573 * hope users don't do anything dicey during this
574 * undesired detour through A_WAIT_BCON.
575 */
576 musb_hnp_stop(musb);
577 usb_hcd_resume_root_hub(musb_to_hcd(musb));
578 musb_root_disconnect(musb);
579 musb_platform_try_idle(musb, jiffies
580 + msecs_to_jiffies(musb->a_wait_bcon
581 ? : OTG_TIME_A_WAIT_BCON));
582
583 break;
584#endif
585 case OTG_STATE_B_IDLE:
586 if (!musb->is_active)
587 break;
588 case OTG_STATE_B_PERIPHERAL:
589 musb_g_suspend(musb);
590 musb->is_active = is_otg_enabled(musb)
591 && musb->xceiv->gadget->b_hnp_enable;
592 if (musb->is_active) {
593#ifdef CONFIG_USB_MUSB_OTG
594 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
595 DBG(1, "HNP: Setting timer for b_ase0_brst\n");
596 mod_timer(&musb->otg_timer, jiffies
597 + msecs_to_jiffies(
598 OTG_TIME_B_ASE0_BRST));
599#endif
600 }
601 break;
602 case OTG_STATE_A_WAIT_BCON:
603 if (musb->a_wait_bcon != 0)
604 musb_platform_try_idle(musb, jiffies
605 + msecs_to_jiffies(musb->a_wait_bcon));
606 break;
607 case OTG_STATE_A_HOST:
608 musb->xceiv->state = OTG_STATE_A_SUSPEND;
609 musb->is_active = is_otg_enabled(musb)
610 && musb->xceiv->host->b_hnp_enable;
611 break;
612 case OTG_STATE_B_HOST:
613 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
614 DBG(1, "REVISIT: SUSPEND as B_HOST\n");
615 break;
616 default:
617 /* "should not happen" */
618 musb->is_active = 0;
619 break;
620 }
621 }
622
560 if (int_usb & MUSB_INTR_CONNECT) { 623 if (int_usb & MUSB_INTR_CONNECT) {
561 struct usb_hcd *hcd = musb_to_hcd(musb); 624 struct usb_hcd *hcd = musb_to_hcd(musb);
562 625
@@ -625,10 +688,61 @@ b_host:
625 } 688 }
626#endif /* CONFIG_USB_MUSB_HDRC_HCD */ 689#endif /* CONFIG_USB_MUSB_HDRC_HCD */
627 690
691 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
692 DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
693 otg_state_string(musb),
694 MUSB_MODE(musb), devctl);
695 handled = IRQ_HANDLED;
696
697 switch (musb->xceiv->state) {
698#ifdef CONFIG_USB_MUSB_HDRC_HCD
699 case OTG_STATE_A_HOST:
700 case OTG_STATE_A_SUSPEND:
701 usb_hcd_resume_root_hub(musb_to_hcd(musb));
702 musb_root_disconnect(musb);
703 if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
704 musb_platform_try_idle(musb, jiffies
705 + msecs_to_jiffies(musb->a_wait_bcon));
706 break;
707#endif /* HOST */
708#ifdef CONFIG_USB_MUSB_OTG
709 case OTG_STATE_B_HOST:
710 /* REVISIT this behaves for "real disconnect"
711 * cases; make sure the other transitions from
712 * from B_HOST act right too. The B_HOST code
713 * in hnp_stop() is currently not used...
714 */
715 musb_root_disconnect(musb);
716 musb_to_hcd(musb)->self.is_b_host = 0;
717 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
718 MUSB_DEV_MODE(musb);
719 musb_g_disconnect(musb);
720 break;
721 case OTG_STATE_A_PERIPHERAL:
722 musb_hnp_stop(musb);
723 musb_root_disconnect(musb);
724 /* FALLTHROUGH */
725 case OTG_STATE_B_WAIT_ACON:
726 /* FALLTHROUGH */
727#endif /* OTG */
728#ifdef CONFIG_USB_GADGET_MUSB_HDRC
729 case OTG_STATE_B_PERIPHERAL:
730 case OTG_STATE_B_IDLE:
731 musb_g_disconnect(musb);
732 break;
733#endif /* GADGET */
734 default:
735 WARNING("unhandled DISCONNECT transition (%s)\n",
736 otg_state_string(musb));
737 break;
738 }
739 }
740
628 /* mentor saves a bit: bus reset and babble share the same irq. 741 /* mentor saves a bit: bus reset and babble share the same irq.
629 * only host sees babble; only peripheral sees bus reset. 742 * only host sees babble; only peripheral sees bus reset.
630 */ 743 */
631 if (int_usb & MUSB_INTR_RESET) { 744 if (int_usb & MUSB_INTR_RESET) {
745 handled = IRQ_HANDLED;
632 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { 746 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
633 /* 747 /*
634 * Looks like non-HS BABBLE can be ignored, but 748 * Looks like non-HS BABBLE can be ignored, but
@@ -641,7 +755,7 @@ b_host:
641 DBG(1, "BABBLE devctl: %02x\n", devctl); 755 DBG(1, "BABBLE devctl: %02x\n", devctl);
642 else { 756 else {
643 ERR("Stopping host session -- babble\n"); 757 ERR("Stopping host session -- babble\n");
644 musb_writeb(mbase, MUSB_DEVCTL, 0); 758 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
645 } 759 }
646 } else if (is_peripheral_capable()) { 760 } else if (is_peripheral_capable()) {
647 DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); 761 DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
@@ -686,29 +800,7 @@ b_host:
686 otg_state_string(musb)); 800 otg_state_string(musb));
687 } 801 }
688 } 802 }
689
690 handled = IRQ_HANDLED;
691 } 803 }
692 schedule_work(&musb->irq_work);
693
694 return handled;
695}
696
697/*
698 * Interrupt Service Routine to record USB "global" interrupts.
699 * Since these do not happen often and signify things of
700 * paramount importance, it seems OK to check them individually;
701 * the order of the tests is specified in the manual
702 *
703 * @param musb instance pointer
704 * @param int_usb register contents
705 * @param devctl
706 * @param power
707 */
708static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
709 u8 devctl, u8 power)
710{
711 irqreturn_t handled = IRQ_NONE;
712 804
713#if 0 805#if 0
714/* REVISIT ... this would be for multiplexing periodic endpoints, or 806/* REVISIT ... this would be for multiplexing periodic endpoints, or
@@ -755,117 +847,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
755 } 847 }
756#endif 848#endif
757 849
758 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { 850 schedule_work(&musb->irq_work);
759 DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
760 otg_state_string(musb),
761 MUSB_MODE(musb), devctl);
762 handled = IRQ_HANDLED;
763
764 switch (musb->xceiv->state) {
765#ifdef CONFIG_USB_MUSB_HDRC_HCD
766 case OTG_STATE_A_HOST:
767 case OTG_STATE_A_SUSPEND:
768 usb_hcd_resume_root_hub(musb_to_hcd(musb));
769 musb_root_disconnect(musb);
770 if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
771 musb_platform_try_idle(musb, jiffies
772 + msecs_to_jiffies(musb->a_wait_bcon));
773 break;
774#endif /* HOST */
775#ifdef CONFIG_USB_MUSB_OTG
776 case OTG_STATE_B_HOST:
777 /* REVISIT this behaves for "real disconnect"
778 * cases; make sure the other transitions from
779 * from B_HOST act right too. The B_HOST code
780 * in hnp_stop() is currently not used...
781 */
782 musb_root_disconnect(musb);
783 musb_to_hcd(musb)->self.is_b_host = 0;
784 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
785 MUSB_DEV_MODE(musb);
786 musb_g_disconnect(musb);
787 break;
788 case OTG_STATE_A_PERIPHERAL:
789 musb_hnp_stop(musb);
790 musb_root_disconnect(musb);
791 /* FALLTHROUGH */
792 case OTG_STATE_B_WAIT_ACON:
793 /* FALLTHROUGH */
794#endif /* OTG */
795#ifdef CONFIG_USB_GADGET_MUSB_HDRC
796 case OTG_STATE_B_PERIPHERAL:
797 case OTG_STATE_B_IDLE:
798 musb_g_disconnect(musb);
799 break;
800#endif /* GADGET */
801 default:
802 WARNING("unhandled DISCONNECT transition (%s)\n",
803 otg_state_string(musb));
804 break;
805 }
806
807 schedule_work(&musb->irq_work);
808 }
809
810 if (int_usb & MUSB_INTR_SUSPEND) {
811 DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
812 otg_state_string(musb), devctl, power);
813 handled = IRQ_HANDLED;
814
815 switch (musb->xceiv->state) {
816#ifdef CONFIG_USB_MUSB_OTG
817 case OTG_STATE_A_PERIPHERAL:
818 /* We also come here if the cable is removed, since
819 * this silicon doesn't report ID-no-longer-grounded.
820 *
821 * We depend on T(a_wait_bcon) to shut us down, and
822 * hope users don't do anything dicey during this
823 * undesired detour through A_WAIT_BCON.
824 */
825 musb_hnp_stop(musb);
826 usb_hcd_resume_root_hub(musb_to_hcd(musb));
827 musb_root_disconnect(musb);
828 musb_platform_try_idle(musb, jiffies
829 + msecs_to_jiffies(musb->a_wait_bcon
830 ? : OTG_TIME_A_WAIT_BCON));
831 break;
832#endif
833 case OTG_STATE_B_PERIPHERAL:
834 musb_g_suspend(musb);
835 musb->is_active = is_otg_enabled(musb)
836 && musb->xceiv->gadget->b_hnp_enable;
837 if (musb->is_active) {
838#ifdef CONFIG_USB_MUSB_OTG
839 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
840 DBG(1, "HNP: Setting timer for b_ase0_brst\n");
841 mod_timer(&musb->otg_timer, jiffies
842 + msecs_to_jiffies(
843 OTG_TIME_B_ASE0_BRST));
844#endif
845 }
846 break;
847 case OTG_STATE_A_WAIT_BCON:
848 if (musb->a_wait_bcon != 0)
849 musb_platform_try_idle(musb, jiffies
850 + msecs_to_jiffies(musb->a_wait_bcon));
851 break;
852 case OTG_STATE_A_HOST:
853 musb->xceiv->state = OTG_STATE_A_SUSPEND;
854 musb->is_active = is_otg_enabled(musb)
855 && musb->xceiv->host->b_hnp_enable;
856 break;
857 case OTG_STATE_B_HOST:
858 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
859 DBG(1, "REVISIT: SUSPEND as B_HOST\n");
860 break;
861 default:
862 /* "should not happen" */
863 musb->is_active = 0;
864 break;
865 }
866 schedule_work(&musb->irq_work);
867 }
868
869 851
870 return handled; 852 return handled;
871} 853}
@@ -1095,6 +1077,36 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
1095{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, 1077{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1096}; 1078};
1097 1079
1080/* mode 5 - fits in 8KB */
1081static struct fifo_cfg __initdata mode_5_cfg[] = {
1082{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1083{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1084{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1085{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1086{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1087{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1088{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1089{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1090{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1091{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1092{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
1093{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
1094{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
1095{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
1096{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
1097{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
1098{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
1099{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
1100{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
1101{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
1102{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
1103{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
1104{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
1105{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
1106{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1107{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1108{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1109};
1098 1110
1099/* 1111/*
1100 * configure a fifo; for non-shared endpoints, this may be called 1112 * configure a fifo; for non-shared endpoints, this may be called
@@ -1210,6 +1222,10 @@ static int __init ep_config_from_table(struct musb *musb)
1210 cfg = mode_4_cfg; 1222 cfg = mode_4_cfg;
1211 n = ARRAY_SIZE(mode_4_cfg); 1223 n = ARRAY_SIZE(mode_4_cfg);
1212 break; 1224 break;
1225 case 5:
1226 cfg = mode_5_cfg;
1227 n = ARRAY_SIZE(mode_5_cfg);
1228 break;
1213 } 1229 }
1214 1230
1215 printk(KERN_DEBUG "%s: setup fifo_mode %d\n", 1231 printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
@@ -1314,9 +1330,6 @@ enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1314 */ 1330 */
1315static int __init musb_core_init(u16 musb_type, struct musb *musb) 1331static int __init musb_core_init(u16 musb_type, struct musb *musb)
1316{ 1332{
1317#ifdef MUSB_AHB_ID
1318 u32 data;
1319#endif
1320 u8 reg; 1333 u8 reg;
1321 char *type; 1334 char *type;
1322 char aInfo[90], aRevision[32], aDate[12]; 1335 char aInfo[90], aRevision[32], aDate[12];
@@ -1328,23 +1341,17 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1328 reg = musb_read_configdata(mbase); 1341 reg = musb_read_configdata(mbase);
1329 1342
1330 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); 1343 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1331 if (reg & MUSB_CONFIGDATA_DYNFIFO) 1344 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1332 strcat(aInfo, ", dyn FIFOs"); 1345 strcat(aInfo, ", dyn FIFOs");
1346 musb->dyn_fifo = true;
1347 }
1333 if (reg & MUSB_CONFIGDATA_MPRXE) { 1348 if (reg & MUSB_CONFIGDATA_MPRXE) {
1334 strcat(aInfo, ", bulk combine"); 1349 strcat(aInfo, ", bulk combine");
1335#ifdef C_MP_RX
1336 musb->bulk_combine = true; 1350 musb->bulk_combine = true;
1337#else
1338 strcat(aInfo, " (X)"); /* no driver support */
1339#endif
1340 } 1351 }
1341 if (reg & MUSB_CONFIGDATA_MPTXE) { 1352 if (reg & MUSB_CONFIGDATA_MPTXE) {
1342 strcat(aInfo, ", bulk split"); 1353 strcat(aInfo, ", bulk split");
1343#ifdef C_MP_TX
1344 musb->bulk_split = true; 1354 musb->bulk_split = true;
1345#else
1346 strcat(aInfo, " (X)"); /* no driver support */
1347#endif
1348 } 1355 }
1349 if (reg & MUSB_CONFIGDATA_HBRXE) { 1356 if (reg & MUSB_CONFIGDATA_HBRXE) {
1350 strcat(aInfo, ", HB-ISO Rx"); 1357 strcat(aInfo, ", HB-ISO Rx");
@@ -1360,20 +1367,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1360 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", 1367 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
1361 musb_driver_name, reg, aInfo); 1368 musb_driver_name, reg, aInfo);
1362 1369
1363#ifdef MUSB_AHB_ID
1364 data = musb_readl(mbase, 0x404);
1365 sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
1366 (data >> 16) & 0xff, (data >> 24) & 0xff);
1367 /* FIXME ID2 and ID3 are unused */
1368 data = musb_readl(mbase, 0x408);
1369 printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
1370 data = musb_readl(mbase, 0x40c);
1371 printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
1372 reg = musb_readb(mbase, 0x400);
1373 musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
1374#else
1375 aDate[0] = 0; 1370 aDate[0] = 0;
1376#endif
1377 if (MUSB_CONTROLLER_MHDRC == musb_type) { 1371 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1378 musb->is_multipoint = 1; 1372 musb->is_multipoint = 1;
1379 type = "M"; 1373 type = "M";
@@ -1404,21 +1398,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1404 musb->nr_endpoints = 1; 1398 musb->nr_endpoints = 1;
1405 musb->epmask = 1; 1399 musb->epmask = 1;
1406 1400
1407 if (reg & MUSB_CONFIGDATA_DYNFIFO) { 1401 if (musb->dyn_fifo)
1408 if (musb->config->dyn_fifo) 1402 status = ep_config_from_table(musb);
1409 status = ep_config_from_table(musb); 1403 else
1410 else { 1404 status = ep_config_from_hw(musb);
1411 ERR("reconfigure software for Dynamic FIFOs\n");
1412 status = -ENODEV;
1413 }
1414 } else {
1415 if (!musb->config->dyn_fifo)
1416 status = ep_config_from_hw(musb);
1417 else {
1418 ERR("reconfigure software for static FIFOs\n");
1419 return -ENODEV;
1420 }
1421 }
1422 1405
1423 if (status < 0) 1406 if (status < 0)
1424 return status; 1407 return status;
@@ -1587,11 +1570,6 @@ irqreturn_t musb_interrupt(struct musb *musb)
1587 ep_num++; 1570 ep_num++;
1588 } 1571 }
1589 1572
1590 /* finish handling "global" interrupts after handling fifos */
1591 if (musb->int_usb)
1592 retval |= musb_stage2_irq(musb,
1593 musb->int_usb, devctl, power);
1594
1595 return retval; 1573 return retval;
1596} 1574}
1597 1575
@@ -1696,7 +1674,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
1696 unsigned long val; 1674 unsigned long val;
1697 1675
1698 if (sscanf(buf, "%lu", &val) < 1) { 1676 if (sscanf(buf, "%lu", &val) < 1) {
1699 printk(KERN_ERR "Invalid VBUS timeout ms value\n"); 1677 dev_err(dev, "Invalid VBUS timeout ms value\n");
1700 return -EINVAL; 1678 return -EINVAL;
1701 } 1679 }
1702 1680
@@ -1746,7 +1724,7 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
1746 1724
1747 if (sscanf(buf, "%hu", &srp) != 1 1725 if (sscanf(buf, "%hu", &srp) != 1
1748 || (srp != 1)) { 1726 || (srp != 1)) {
1749 printk(KERN_ERR "SRP: Value must be 1\n"); 1727 dev_err(dev, "SRP: Value must be 1\n");
1750 return -EINVAL; 1728 return -EINVAL;
1751 } 1729 }
1752 1730
@@ -1759,6 +1737,19 @@ static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1759 1737
1760#endif /* CONFIG_USB_GADGET_MUSB_HDRC */ 1738#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
1761 1739
1740static struct attribute *musb_attributes[] = {
1741 &dev_attr_mode.attr,
1742 &dev_attr_vbus.attr,
1743#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1744 &dev_attr_srp.attr,
1745#endif
1746 NULL
1747};
1748
1749static const struct attribute_group musb_attr_group = {
1750 .attrs = musb_attributes,
1751};
1752
1762#endif /* sysfs */ 1753#endif /* sysfs */
1763 1754
1764/* Only used to provide driver mode change events */ 1755/* Only used to provide driver mode change events */
@@ -1833,11 +1824,7 @@ static void musb_free(struct musb *musb)
1833 */ 1824 */
1834 1825
1835#ifdef CONFIG_SYSFS 1826#ifdef CONFIG_SYSFS
1836 device_remove_file(musb->controller, &dev_attr_mode); 1827 sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
1837 device_remove_file(musb->controller, &dev_attr_vbus);
1838#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1839 device_remove_file(musb->controller, &dev_attr_srp);
1840#endif
1841#endif 1828#endif
1842 1829
1843#ifdef CONFIG_USB_GADGET_MUSB_HDRC 1830#ifdef CONFIG_USB_GADGET_MUSB_HDRC
@@ -2017,22 +2004,10 @@ bad_config:
2017 musb->irq_wake = 0; 2004 musb->irq_wake = 0;
2018 } 2005 }
2019 2006
2020 pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
2021 musb_driver_name,
2022 ({char *s;
2023 switch (musb->board_mode) {
2024 case MUSB_HOST: s = "Host"; break;
2025 case MUSB_PERIPHERAL: s = "Peripheral"; break;
2026 default: s = "OTG"; break;
2027 }; s; }),
2028 ctrl,
2029 (is_dma_capable() && musb->dma_controller)
2030 ? "DMA" : "PIO",
2031 musb->nIrq);
2032
2033 /* host side needs more setup */ 2007 /* host side needs more setup */
2034 if (is_host_enabled(musb)) { 2008 if (is_host_enabled(musb)) {
2035 struct usb_hcd *hcd = musb_to_hcd(musb); 2009 struct usb_hcd *hcd = musb_to_hcd(musb);
2010 u8 busctl;
2036 2011
2037 otg_set_host(musb->xceiv, &hcd->self); 2012 otg_set_host(musb->xceiv, &hcd->self);
2038 2013
@@ -2040,6 +2015,13 @@ bad_config:
2040 hcd->self.otg_port = 1; 2015 hcd->self.otg_port = 1;
2041 musb->xceiv->host = &hcd->self; 2016 musb->xceiv->host = &hcd->self;
2042 hcd->power_budget = 2 * (plat->power ? : 250); 2017 hcd->power_budget = 2 * (plat->power ? : 250);
2018
2019 /* program PHY to use external vBus if required */
2020 if (plat->extvbus) {
2021 busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
2022 busctl |= MUSB_ULPI_USE_EXTVBUS;
2023 musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
2024 }
2043 } 2025 }
2044 2026
2045 /* For the host-only role, we can activate right away. 2027 /* For the host-only role, we can activate right away.
@@ -2079,26 +2061,26 @@ bad_config:
2079 } 2061 }
2080 2062
2081#ifdef CONFIG_SYSFS 2063#ifdef CONFIG_SYSFS
2082 status = device_create_file(dev, &dev_attr_mode); 2064 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2083 status = device_create_file(dev, &dev_attr_vbus);
2084#ifdef CONFIG_USB_GADGET_MUSB_HDRC
2085 status = device_create_file(dev, &dev_attr_srp);
2086#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
2087 status = 0;
2088#endif 2065#endif
2089 if (status) 2066 if (status)
2090 goto fail2; 2067 goto fail2;
2091 2068
2069 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
2070 ({char *s;
2071 switch (musb->board_mode) {
2072 case MUSB_HOST: s = "Host"; break;
2073 case MUSB_PERIPHERAL: s = "Peripheral"; break;
2074 default: s = "OTG"; break;
2075 }; s; }),
2076 ctrl,
2077 (is_dma_capable() && musb->dma_controller)
2078 ? "DMA" : "PIO",
2079 musb->nIrq);
2080
2092 return 0; 2081 return 0;
2093 2082
2094fail2: 2083fail2:
2095#ifdef CONFIG_SYSFS
2096 device_remove_file(musb->controller, &dev_attr_mode);
2097 device_remove_file(musb->controller, &dev_attr_vbus);
2098#ifdef CONFIG_USB_GADGET_MUSB_HDRC
2099 device_remove_file(musb->controller, &dev_attr_srp);
2100#endif
2101#endif
2102 musb_platform_exit(musb); 2084 musb_platform_exit(musb);
2103fail: 2085fail:
2104 dev_err(musb->controller, 2086 dev_err(musb->controller,
@@ -2127,6 +2109,7 @@ static int __init musb_probe(struct platform_device *pdev)
2127{ 2109{
2128 struct device *dev = &pdev->dev; 2110 struct device *dev = &pdev->dev;
2129 int irq = platform_get_irq(pdev, 0); 2111 int irq = platform_get_irq(pdev, 0);
2112 int status;
2130 struct resource *iomem; 2113 struct resource *iomem;
2131 void __iomem *base; 2114 void __iomem *base;
2132 2115
@@ -2134,7 +2117,7 @@ static int __init musb_probe(struct platform_device *pdev)
2134 if (!iomem || irq == 0) 2117 if (!iomem || irq == 0)
2135 return -ENODEV; 2118 return -ENODEV;
2136 2119
2137 base = ioremap(iomem->start, iomem->end - iomem->start + 1); 2120 base = ioremap(iomem->start, resource_size(iomem));
2138 if (!base) { 2121 if (!base) {
2139 dev_err(dev, "ioremap failed\n"); 2122 dev_err(dev, "ioremap failed\n");
2140 return -ENOMEM; 2123 return -ENOMEM;
@@ -2144,7 +2127,12 @@ static int __init musb_probe(struct platform_device *pdev)
2144 /* clobbered by use_dma=n */ 2127 /* clobbered by use_dma=n */
2145 orig_dma_mask = dev->dma_mask; 2128 orig_dma_mask = dev->dma_mask;
2146#endif 2129#endif
2147 return musb_init_controller(dev, irq, base); 2130
2131 status = musb_init_controller(dev, irq, base);
2132 if (status < 0)
2133 iounmap(base);
2134
2135 return status;
2148} 2136}
2149 2137
2150static int __exit musb_remove(struct platform_device *pdev) 2138static int __exit musb_remove(struct platform_device *pdev)
@@ -2173,6 +2161,148 @@ static int __exit musb_remove(struct platform_device *pdev)
2173 2161
2174#ifdef CONFIG_PM 2162#ifdef CONFIG_PM
2175 2163
2164static struct musb_context_registers musb_context;
2165
2166void musb_save_context(struct musb *musb)
2167{
2168 int i;
2169 void __iomem *musb_base = musb->mregs;
2170
2171 if (is_host_enabled(musb)) {
2172 musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
2173 musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2174 }
2175 musb_context.power = musb_readb(musb_base, MUSB_POWER);
2176 musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
2177 musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
2178 musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2179 musb_context.index = musb_readb(musb_base, MUSB_INDEX);
2180 musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2181
2182 for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
2183 musb_writeb(musb_base, MUSB_INDEX, i);
2184 musb_context.index_regs[i].txmaxp =
2185 musb_readw(musb_base, 0x10 + MUSB_TXMAXP);
2186 musb_context.index_regs[i].txcsr =
2187 musb_readw(musb_base, 0x10 + MUSB_TXCSR);
2188 musb_context.index_regs[i].rxmaxp =
2189 musb_readw(musb_base, 0x10 + MUSB_RXMAXP);
2190 musb_context.index_regs[i].rxcsr =
2191 musb_readw(musb_base, 0x10 + MUSB_RXCSR);
2192
2193 if (musb->dyn_fifo) {
2194 musb_context.index_regs[i].txfifoadd =
2195 musb_read_txfifoadd(musb_base);
2196 musb_context.index_regs[i].rxfifoadd =
2197 musb_read_rxfifoadd(musb_base);
2198 musb_context.index_regs[i].txfifosz =
2199 musb_read_txfifosz(musb_base);
2200 musb_context.index_regs[i].rxfifosz =
2201 musb_read_rxfifosz(musb_base);
2202 }
2203 if (is_host_enabled(musb)) {
2204 musb_context.index_regs[i].txtype =
2205 musb_readb(musb_base, 0x10 + MUSB_TXTYPE);
2206 musb_context.index_regs[i].txinterval =
2207 musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL);
2208 musb_context.index_regs[i].rxtype =
2209 musb_readb(musb_base, 0x10 + MUSB_RXTYPE);
2210 musb_context.index_regs[i].rxinterval =
2211 musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL);
2212
2213 musb_context.index_regs[i].txfunaddr =
2214 musb_read_txfunaddr(musb_base, i);
2215 musb_context.index_regs[i].txhubaddr =
2216 musb_read_txhubaddr(musb_base, i);
2217 musb_context.index_regs[i].txhubport =
2218 musb_read_txhubport(musb_base, i);
2219
2220 musb_context.index_regs[i].rxfunaddr =
2221 musb_read_rxfunaddr(musb_base, i);
2222 musb_context.index_regs[i].rxhubaddr =
2223 musb_read_rxhubaddr(musb_base, i);
2224 musb_context.index_regs[i].rxhubport =
2225 musb_read_rxhubport(musb_base, i);
2226 }
2227 }
2228
2229 musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
2230
2231 musb_platform_save_context(musb, &musb_context);
2232}
2233
2234void musb_restore_context(struct musb *musb)
2235{
2236 int i;
2237 void __iomem *musb_base = musb->mregs;
2238 void __iomem *ep_target_regs;
2239
2240 musb_platform_restore_context(musb, &musb_context);
2241
2242 if (is_host_enabled(musb)) {
2243 musb_writew(musb_base, MUSB_FRAME, musb_context.frame);
2244 musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode);
2245 }
2246 musb_writeb(musb_base, MUSB_POWER, musb_context.power);
2247 musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe);
2248 musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe);
2249 musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe);
2250 musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl);
2251
2252 for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
2253 musb_writeb(musb_base, MUSB_INDEX, i);
2254 musb_writew(musb_base, 0x10 + MUSB_TXMAXP,
2255 musb_context.index_regs[i].txmaxp);
2256 musb_writew(musb_base, 0x10 + MUSB_TXCSR,
2257 musb_context.index_regs[i].txcsr);
2258 musb_writew(musb_base, 0x10 + MUSB_RXMAXP,
2259 musb_context.index_regs[i].rxmaxp);
2260 musb_writew(musb_base, 0x10 + MUSB_RXCSR,
2261 musb_context.index_regs[i].rxcsr);
2262
2263 if (musb->dyn_fifo) {
2264 musb_write_txfifosz(musb_base,
2265 musb_context.index_regs[i].txfifosz);
2266 musb_write_rxfifosz(musb_base,
2267 musb_context.index_regs[i].rxfifosz);
2268 musb_write_txfifoadd(musb_base,
2269 musb_context.index_regs[i].txfifoadd);
2270 musb_write_rxfifoadd(musb_base,
2271 musb_context.index_regs[i].rxfifoadd);
2272 }
2273
2274 if (is_host_enabled(musb)) {
2275 musb_writeb(musb_base, 0x10 + MUSB_TXTYPE,
2276 musb_context.index_regs[i].txtype);
2277 musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL,
2278 musb_context.index_regs[i].txinterval);
2279 musb_writeb(musb_base, 0x10 + MUSB_RXTYPE,
2280 musb_context.index_regs[i].rxtype);
2281 musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL,
2282
2283 musb_context.index_regs[i].rxinterval);
2284 musb_write_txfunaddr(musb_base, i,
2285 musb_context.index_regs[i].txfunaddr);
2286 musb_write_txhubaddr(musb_base, i,
2287 musb_context.index_regs[i].txhubaddr);
2288 musb_write_txhubport(musb_base, i,
2289 musb_context.index_regs[i].txhubport);
2290
2291 ep_target_regs =
2292 musb_read_target_reg_base(i, musb_base);
2293
2294 musb_write_rxfunaddr(ep_target_regs,
2295 musb_context.index_regs[i].rxfunaddr);
2296 musb_write_rxhubaddr(ep_target_regs,
2297 musb_context.index_regs[i].rxhubaddr);
2298 musb_write_rxhubport(ep_target_regs,
2299 musb_context.index_regs[i].rxhubport);
2300 }
2301 }
2302
2303 musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
2304}
2305
2176static int musb_suspend(struct device *dev) 2306static int musb_suspend(struct device *dev)
2177{ 2307{
2178 struct platform_device *pdev = to_platform_device(dev); 2308 struct platform_device *pdev = to_platform_device(dev);
@@ -2194,6 +2324,8 @@ static int musb_suspend(struct device *dev)
2194 */ 2324 */
2195 } 2325 }
2196 2326
2327 musb_save_context(musb);
2328
2197 if (musb->set_clock) 2329 if (musb->set_clock)
2198 musb->set_clock(musb->clock, 0); 2330 musb->set_clock(musb->clock, 0);
2199 else 2331 else
@@ -2215,6 +2347,8 @@ static int musb_resume_noirq(struct device *dev)
2215 else 2347 else
2216 clk_enable(musb->clock); 2348 clk_enable(musb->clock);
2217 2349
2350 musb_restore_context(musb);
2351
2218 /* for static cmos like DaVinci, register values were preserved 2352 /* for static cmos like DaVinci, register values were preserved
2219 * unless for some reason the whole soc powered down or the USB 2353 * unless for some reason the whole soc powered down or the USB
2220 * module got reset through the PSC (vs just being disabled). 2354 * module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 5514c7ee85bd..d849fb81c131 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -52,6 +52,15 @@ struct musb;
52struct musb_hw_ep; 52struct musb_hw_ep;
53struct musb_ep; 53struct musb_ep;
54 54
55/* Helper defines for struct musb->hwvers */
56#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
57#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
58#define MUSB_HWVERS_RC 0x8000
59#define MUSB_HWVERS_1300 0x52C
60#define MUSB_HWVERS_1400 0x590
61#define MUSB_HWVERS_1800 0x720
62#define MUSB_HWVERS_1900 0x784
63#define MUSB_HWVERS_2000 0x800
55 64
56#include "musb_debug.h" 65#include "musb_debug.h"
57#include "musb_dma.h" 66#include "musb_dma.h"
@@ -322,13 +331,6 @@ struct musb {
322 struct clk *clock; 331 struct clk *clock;
323 irqreturn_t (*isr)(int, void *); 332 irqreturn_t (*isr)(int, void *);
324 struct work_struct irq_work; 333 struct work_struct irq_work;
325#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
326#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
327#define MUSB_HWVERS_RC 0x8000
328#define MUSB_HWVERS_1300 0x52C
329#define MUSB_HWVERS_1400 0x590
330#define MUSB_HWVERS_1800 0x720
331#define MUSB_HWVERS_2000 0x800
332 u16 hwvers; 334 u16 hwvers;
333 335
334/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ 336/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -411,22 +413,15 @@ struct musb {
411 413
412 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */ 414 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
413 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */ 415 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
416 unsigned dyn_fifo:1; /* dynamic FIFO supported? */
414 417
415#ifdef C_MP_TX 418 unsigned bulk_split:1;
416 unsigned bulk_split:1;
417#define can_bulk_split(musb,type) \ 419#define can_bulk_split(musb,type) \
418 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) 420 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
419#else
420#define can_bulk_split(musb, type) 0
421#endif
422 421
423#ifdef C_MP_RX 422 unsigned bulk_combine:1;
424 unsigned bulk_combine:1;
425#define can_bulk_combine(musb,type) \ 423#define can_bulk_combine(musb,type) \
426 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) 424 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
427#else
428#define can_bulk_combine(musb, type) 0
429#endif
430 425
431#ifdef CONFIG_USB_GADGET_MUSB_HDRC 426#ifdef CONFIG_USB_GADGET_MUSB_HDRC
432 /* is_suspended means USB B_PERIPHERAL suspend */ 427 /* is_suspended means USB B_PERIPHERAL suspend */
@@ -461,6 +456,45 @@ struct musb {
461#endif 456#endif
462}; 457};
463 458
459#ifdef CONFIG_PM
460struct musb_csr_regs {
461 /* FIFO registers */
462 u16 txmaxp, txcsr, rxmaxp, rxcsr;
463 u16 rxfifoadd, txfifoadd;
464 u8 txtype, txinterval, rxtype, rxinterval;
465 u8 rxfifosz, txfifosz;
466 u8 txfunaddr, txhubaddr, txhubport;
467 u8 rxfunaddr, rxhubaddr, rxhubport;
468};
469
470struct musb_context_registers {
471
472#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
473 u32 otg_sysconfig, otg_forcestandby;
474#endif
475 u8 power;
476 u16 intrtxe, intrrxe;
477 u8 intrusbe;
478 u16 frame;
479 u8 index, testmode;
480
481 u8 devctl, misc;
482
483 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
484};
485
486#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
487extern void musb_platform_save_context(struct musb *musb,
488 struct musb_context_registers *musb_context);
489extern void musb_platform_restore_context(struct musb *musb,
490 struct musb_context_registers *musb_context);
491#else
492#define musb_platform_save_context(m, x) do {} while (0)
493#define musb_platform_restore_context(m, x) do {} while (0)
494#endif
495
496#endif
497
464static inline void musb_set_vbus(struct musb *musb, int is_on) 498static inline void musb_set_vbus(struct musb *musb, int is_on)
465{ 499{
466 musb->board_set_vbus(musb, is_on); 500 musb->board_set_vbus(musb, is_on);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index cbcf14a236e6..a9f288cd70ed 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -895,7 +895,14 @@ static int musb_gadget_enable(struct usb_ep *ep,
895 /* REVISIT if can_bulk_split(), use by updating "tmp"; 895 /* REVISIT if can_bulk_split(), use by updating "tmp";
896 * likewise high bandwidth periodic tx 896 * likewise high bandwidth periodic tx
897 */ 897 */
898 musb_writew(regs, MUSB_TXMAXP, tmp); 898 /* Set TXMAXP with the FIFO size of the endpoint
899 * to disable double buffering mode. Currently, It seems that double
900 * buffering has problem if musb RTL revision number < 2.0.
901 */
902 if (musb->hwvers < MUSB_HWVERS_2000)
903 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
904 else
905 musb_writew(regs, MUSB_TXMAXP, tmp);
899 906
900 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 907 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
901 if (musb_readw(regs, MUSB_TXCSR) 908 if (musb_readw(regs, MUSB_TXCSR)
@@ -925,7 +932,13 @@ static int musb_gadget_enable(struct usb_ep *ep,
925 /* REVISIT if can_bulk_combine() use by updating "tmp" 932 /* REVISIT if can_bulk_combine() use by updating "tmp"
926 * likewise high bandwidth periodic rx 933 * likewise high bandwidth periodic rx
927 */ 934 */
928 musb_writew(regs, MUSB_RXMAXP, tmp); 935 /* Set RXMAXP with the FIFO size of the endpoint
936 * to disable double buffering mode.
937 */
938 if (musb->hwvers < MUSB_HWVERS_2000)
939 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
940 else
941 musb_writew(regs, MUSB_RXMAXP, tmp);
929 942
930 /* force shared fifo to OUT-only mode */ 943 /* force shared fifo to OUT-only mode */
931 if (hw_ep->is_shared_fifo) { 944 if (hw_ep->is_shared_fifo) {
@@ -1697,8 +1710,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1697 return -EINVAL; 1710 return -EINVAL;
1698 1711
1699 /* driver must be initialized to support peripheral mode */ 1712 /* driver must be initialized to support peripheral mode */
1700 if (!musb || !(musb->board_mode == MUSB_OTG 1713 if (!musb) {
1701 || musb->board_mode != MUSB_OTG)) {
1702 DBG(1, "%s, no dev??\n", __func__); 1714 DBG(1, "%s, no dev??\n", __func__);
1703 return -ENODEV; 1715 return -ENODEV;
1704 } 1716 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 74c4c3698f1e..3421cf9858b5 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -605,8 +605,14 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
607 /* NOTE: bulk combining rewrites high bits of maxpacket */ 607 /* NOTE: bulk combining rewrites high bits of maxpacket */
608 musb_writew(ep->regs, MUSB_RXMAXP, 608 /* Set RXMAXP with the FIFO size of the endpoint
609 qh->maxpacket | ((qh->hb_mult - 1) << 11)); 609 * to disable double buffer mode.
610 */
611 if (musb->hwvers < MUSB_HWVERS_2000)
612 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
613 else
614 musb_writew(ep->regs, MUSB_RXMAXP,
615 qh->maxpacket | ((qh->hb_mult - 1) << 11));
610 616
611 ep->rx_reinit = 0; 617 ep->rx_reinit = 0;
612} 618}
@@ -1771,6 +1777,9 @@ static int musb_schedule(
1771 int best_end, epnum; 1777 int best_end, epnum;
1772 struct musb_hw_ep *hw_ep = NULL; 1778 struct musb_hw_ep *hw_ep = NULL;
1773 struct list_head *head = NULL; 1779 struct list_head *head = NULL;
1780 u8 toggle;
1781 u8 txtype;
1782 struct urb *urb = next_urb(qh);
1774 1783
1775 /* use fixed hardware for control and bulk */ 1784 /* use fixed hardware for control and bulk */
1776 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1785 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
@@ -1809,6 +1818,27 @@ static int musb_schedule(
1809 diff -= (qh->maxpacket * qh->hb_mult); 1818 diff -= (qh->maxpacket * qh->hb_mult);
1810 1819
1811 if (diff >= 0 && best_diff > diff) { 1820 if (diff >= 0 && best_diff > diff) {
1821
1822 /*
1823 * Mentor controller has a bug in that if we schedule
1824 * a BULK Tx transfer on an endpoint that had earlier
1825 * handled ISOC then the BULK transfer has to start on
1826 * a zero toggle. If the BULK transfer starts on a 1
1827 * toggle then this transfer will fail as the mentor
1828 * controller starts the Bulk transfer on a 0 toggle
1829 * irrespective of the programming of the toggle bits
1830 * in the TXCSR register. Check for this condition
1831 * while allocating the EP for a Tx Bulk transfer. If
1832 * so skip this EP.
1833 */
1834 hw_ep = musb->endpoints + epnum;
1835 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1836 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1837 >> 4) & 0x3;
1838 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1839 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1840 continue;
1841
1812 best_diff = diff; 1842 best_diff = diff;
1813 best_end = epnum; 1843 best_end = epnum;
1814 } 1844 }
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 473a94ef905f..292894a2c247 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -72,6 +72,10 @@
72#define MUSB_DEVCTL_HR 0x02 72#define MUSB_DEVCTL_HR 0x02
73#define MUSB_DEVCTL_SESSION 0x01 73#define MUSB_DEVCTL_SESSION 0x01
74 74
75/* MUSB ULPI VBUSCONTROL */
76#define MUSB_ULPI_USE_EXTVBUS 0x01
77#define MUSB_ULPI_USE_EXTVBUSIND 0x02
78
75/* TESTMODE */ 79/* TESTMODE */
76#define MUSB_TEST_FORCE_HOST 0x80 80#define MUSB_TEST_FORCE_HOST 0x80
77#define MUSB_TEST_FIFO_ACCESS 0x40 81#define MUSB_TEST_FIFO_ACCESS 0x40
@@ -246,6 +250,7 @@
246 250
247/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ 251/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
248#define MUSB_HWVERS 0x6C /* 8 bit */ 252#define MUSB_HWVERS 0x6C /* 8 bit */
253#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
249 254
250#define MUSB_EPINFO 0x78 /* 8 bit */ 255#define MUSB_EPINFO 0x78 /* 8 bit */
251#define MUSB_RAMINFO 0x79 /* 8 bit */ 256#define MUSB_RAMINFO 0x79 /* 8 bit */
@@ -321,6 +326,26 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
321 musb_writew(mbase, MUSB_RXFIFOADD, c_off); 326 musb_writew(mbase, MUSB_RXFIFOADD, c_off);
322} 327}
323 328
329static inline u8 musb_read_txfifosz(void __iomem *mbase)
330{
331 return musb_readb(mbase, MUSB_TXFIFOSZ);
332}
333
334static inline u16 musb_read_txfifoadd(void __iomem *mbase)
335{
336 return musb_readw(mbase, MUSB_TXFIFOADD);
337}
338
339static inline u8 musb_read_rxfifosz(void __iomem *mbase)
340{
341 return musb_readb(mbase, MUSB_RXFIFOSZ);
342}
343
344static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
345{
346 return musb_readw(mbase, MUSB_RXFIFOADD);
347}
348
324static inline u8 musb_read_configdata(void __iomem *mbase) 349static inline u8 musb_read_configdata(void __iomem *mbase)
325{ 350{
326 musb_writeb(mbase, MUSB_INDEX, 0); 351 musb_writeb(mbase, MUSB_INDEX, 0);
@@ -376,6 +401,36 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
376 qh_h_port_reg); 401 qh_h_port_reg);
377} 402}
378 403
404static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
405{
406 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR));
407}
408
409static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
410{
411 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR));
412}
413
414static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
415{
416 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT));
417}
418
419static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
420{
421 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR));
422}
423
424static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
425{
426 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR));
427}
428
429static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
430{
431 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT));
432}
433
379#else /* CONFIG_BLACKFIN */ 434#else /* CONFIG_BLACKFIN */
380 435
381#define USB_BASE USB_FADDR 436#define USB_BASE USB_FADDR
@@ -455,6 +510,22 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
455{ 510{
456} 511}
457 512
513static inline u8 musb_read_txfifosz(void __iomem *mbase)
514{
515}
516
517static inline u16 musb_read_txfifoadd(void __iomem *mbase)
518{
519}
520
521static inline u8 musb_read_rxfifosz(void __iomem *mbase)
522{
523}
524
525static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
526{
527}
528
458static inline u8 musb_read_configdata(void __iomem *mbase) 529static inline u8 musb_read_configdata(void __iomem *mbase)
459{ 530{
460 return 0; 531 return 0;
@@ -462,7 +533,11 @@ static inline u8 musb_read_configdata(void __iomem *mbase)
462 533
463static inline u16 musb_read_hwvers(void __iomem *mbase) 534static inline u16 musb_read_hwvers(void __iomem *mbase)
464{ 535{
465 return 0; 536 /*
537 * This register is invisible on Blackfin, actually the MUSB
538 * RTL version of Blackfin is 1.9, so just harcode its value.
539 */
540 return MUSB_HWVERS_1900;
466} 541}
467 542
468static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase) 543static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
@@ -500,6 +575,30 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
500{ 575{
501} 576}
502 577
578static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
579{
580}
581
582static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
583{
584}
585
586static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
587{
588}
589
590static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
591{
592}
593
594static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
595{
596}
597
598static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum)
599{
600}
601
503#endif /* CONFIG_BLACKFIN */ 602#endif /* CONFIG_BLACKFIN */
504 603
505#endif /* __MUSB_REGS_H__ */ 604#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a237550f91bf..2fa7d5c00f31 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -250,20 +250,39 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
250 u8 bchannel; 250 u8 bchannel;
251 u8 int_hsdma; 251 u8 int_hsdma;
252 252
253 u32 addr; 253 u32 addr, count;
254 u16 csr; 254 u16 csr;
255 255
256 spin_lock_irqsave(&musb->lock, flags); 256 spin_lock_irqsave(&musb->lock, flags);
257 257
258 int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); 258 int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
259 if (!int_hsdma)
260 goto done;
261 259
262#ifdef CONFIG_BLACKFIN 260#ifdef CONFIG_BLACKFIN
263 /* Clear DMA interrupt flags */ 261 /* Clear DMA interrupt flags */
264 musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma); 262 musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
265#endif 263#endif
266 264
265 if (!int_hsdma) {
266 DBG(2, "spurious DMA irq\n");
267
268 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
269 musb_channel = (struct musb_dma_channel *)
270 &(controller->channel[bchannel]);
271 channel = &musb_channel->channel;
272 if (channel->status == MUSB_DMA_STATUS_BUSY) {
273 count = musb_read_hsdma_count(mbase, bchannel);
274
275 if (count == 0)
276 int_hsdma |= (1 << bchannel);
277 }
278 }
279
280 DBG(2, "int_hsdma = 0x%x\n", int_hsdma);
281
282 if (!int_hsdma)
283 goto done;
284 }
285
267 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { 286 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
268 if (int_hsdma & (1 << bchannel)) { 287 if (int_hsdma & (1 << bchannel)) {
269 musb_channel = (struct musb_dma_channel *) 288 musb_channel = (struct musb_dma_channel *)
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 1299d92dc83f..613f95a058f7 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -55,6 +55,10 @@
55 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \ 55 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
56 addr) 56 addr)
57 57
58#define musb_read_hsdma_count(mbase, bchannel) \
59 musb_readl(mbase, \
60 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
61
58#define musb_write_hsdma_count(mbase, bchannel, len) \ 62#define musb_write_hsdma_count(mbase, bchannel, len) \
59 musb_writel(mbase, \ 63 musb_writel(mbase, \
60 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \ 64 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
@@ -96,6 +100,19 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
96 ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); 100 ((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
97} 101}
98 102
103static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
104{
105 u32 count = musb_readw(mbase,
106 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
107
108 count = count << 16;
109
110 count |= musb_readw(mbase,
111 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
112
113 return count;
114}
115
99static inline void musb_write_hsdma_count(void __iomem *mbase, 116static inline void musb_write_hsdma_count(void __iomem *mbase,
100 u8 bchannel, u32 len) 117 u8 bchannel, u32 len)
101{ 118{
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 83beeac5e7bf..3fe16867b5a8 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -220,7 +220,7 @@ int __init musb_platform_init(struct musb *musb)
220 220
221 musb_platform_resume(musb); 221 musb_platform_resume(musb);
222 222
223 l = omap_readl(OTG_SYSCONFIG); 223 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
224 l &= ~ENABLEWAKEUP; /* disable wakeup */ 224 l &= ~ENABLEWAKEUP; /* disable wakeup */
225 l &= ~NOSTDBY; /* remove possible nostdby */ 225 l &= ~NOSTDBY; /* remove possible nostdby */
226 l |= SMARTSTDBY; /* enable smart standby */ 226 l |= SMARTSTDBY; /* enable smart standby */
@@ -233,17 +233,19 @@ int __init musb_platform_init(struct musb *musb)
233 */ 233 */
234 if (!cpu_is_omap3430()) 234 if (!cpu_is_omap3430())
235 l |= AUTOIDLE; /* enable auto idle */ 235 l |= AUTOIDLE; /* enable auto idle */
236 omap_writel(l, OTG_SYSCONFIG); 236 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
237 237
238 l = omap_readl(OTG_INTERFSEL); 238 l = musb_readl(musb->mregs, OTG_INTERFSEL);
239 l |= ULPI_12PIN; 239 l |= ULPI_12PIN;
240 omap_writel(l, OTG_INTERFSEL); 240 musb_writel(musb->mregs, OTG_INTERFSEL, l);
241 241
242 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " 242 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
243 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", 243 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
244 omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG), 244 musb_readl(musb->mregs, OTG_REVISION),
245 omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL), 245 musb_readl(musb->mregs, OTG_SYSCONFIG),
246 omap_readl(OTG_SIMENABLE)); 246 musb_readl(musb->mregs, OTG_SYSSTATUS),
247 musb_readl(musb->mregs, OTG_INTERFSEL),
248 musb_readl(musb->mregs, OTG_SIMENABLE));
247 249
248 omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1); 250 omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
249 251
@@ -255,6 +257,22 @@ int __init musb_platform_init(struct musb *musb)
255 return 0; 257 return 0;
256} 258}
257 259
260#ifdef CONFIG_PM
261void musb_platform_save_context(struct musb *musb,
262 struct musb_context_registers *musb_context)
263{
264 musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
265 musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
266}
267
268void musb_platform_restore_context(struct musb *musb,
269 struct musb_context_registers *musb_context)
270{
271 musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig);
272 musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby);
273}
274#endif
275
258int musb_platform_suspend(struct musb *musb) 276int musb_platform_suspend(struct musb *musb)
259{ 277{
260 u32 l; 278 u32 l;
@@ -263,13 +281,13 @@ int musb_platform_suspend(struct musb *musb)
263 return 0; 281 return 0;
264 282
265 /* in any role */ 283 /* in any role */
266 l = omap_readl(OTG_FORCESTDBY); 284 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
267 l |= ENABLEFORCE; /* enable MSTANDBY */ 285 l |= ENABLEFORCE; /* enable MSTANDBY */
268 omap_writel(l, OTG_FORCESTDBY); 286 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
269 287
270 l = omap_readl(OTG_SYSCONFIG); 288 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
271 l |= ENABLEWAKEUP; /* enable wakeup */ 289 l |= ENABLEWAKEUP; /* enable wakeup */
272 omap_writel(l, OTG_SYSCONFIG); 290 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
273 291
274 otg_set_suspend(musb->xceiv, 1); 292 otg_set_suspend(musb->xceiv, 1);
275 293
@@ -295,13 +313,13 @@ static int musb_platform_resume(struct musb *musb)
295 else 313 else
296 clk_enable(musb->clock); 314 clk_enable(musb->clock);
297 315
298 l = omap_readl(OTG_SYSCONFIG); 316 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
299 l &= ~ENABLEWAKEUP; /* disable wakeup */ 317 l &= ~ENABLEWAKEUP; /* disable wakeup */
300 omap_writel(l, OTG_SYSCONFIG); 318 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
301 319
302 l = omap_readl(OTG_FORCESTDBY); 320 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
303 l &= ~ENABLEFORCE; /* disable MSTANDBY */ 321 l &= ~ENABLEFORCE; /* disable MSTANDBY */
304 omap_writel(l, OTG_FORCESTDBY); 322 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
305 323
306 return 0; 324 return 0;
307} 325}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index fbede7798aed..40b3c02ae9f0 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -10,47 +10,43 @@
10#ifndef __MUSB_OMAP243X_H__ 10#ifndef __MUSB_OMAP243X_H__
11#define __MUSB_OMAP243X_H__ 11#define __MUSB_OMAP243X_H__
12 12
13#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
14#include <mach/hardware.h>
15#include <plat/usb.h> 13#include <plat/usb.h>
16 14
17/* 15/*
18 * OMAP2430-specific definitions 16 * OMAP2430-specific definitions
19 */ 17 */
20 18
21#define MENTOR_BASE_OFFSET 0 19#define OTG_REVISION 0x400
22#if defined(CONFIG_ARCH_OMAP2430) 20
23#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE) 21#define OTG_SYSCONFIG 0x404
24#elif defined(CONFIG_ARCH_OMAP3430)
25#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
26#endif
27#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
28#define OTG_REVISION OMAP_HSOTG(0x0)
29#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
30# define MIDLEMODE 12 /* bit position */ 22# define MIDLEMODE 12 /* bit position */
31# define FORCESTDBY (0 << MIDLEMODE) 23# define FORCESTDBY (0 << MIDLEMODE)
32# define NOSTDBY (1 << MIDLEMODE) 24# define NOSTDBY (1 << MIDLEMODE)
33# define SMARTSTDBY (2 << MIDLEMODE) 25# define SMARTSTDBY (2 << MIDLEMODE)
26
34# define SIDLEMODE 3 /* bit position */ 27# define SIDLEMODE 3 /* bit position */
35# define FORCEIDLE (0 << SIDLEMODE) 28# define FORCEIDLE (0 << SIDLEMODE)
36# define NOIDLE (1 << SIDLEMODE) 29# define NOIDLE (1 << SIDLEMODE)
37# define SMARTIDLE (2 << SIDLEMODE) 30# define SMARTIDLE (2 << SIDLEMODE)
31
38# define ENABLEWAKEUP (1 << 2) 32# define ENABLEWAKEUP (1 << 2)
39# define SOFTRST (1 << 1) 33# define SOFTRST (1 << 1)
40# define AUTOIDLE (1 << 0) 34# define AUTOIDLE (1 << 0)
41#define OTG_SYSSTATUS OMAP_HSOTG(0x8) 35
36#define OTG_SYSSTATUS 0x408
42# define RESETDONE (1 << 0) 37# define RESETDONE (1 << 0)
43#define OTG_INTERFSEL OMAP_HSOTG(0xc) 38
39#define OTG_INTERFSEL 0x40c
44# define EXTCP (1 << 2) 40# define EXTCP (1 << 2)
45# define PHYSEL 0 /* bit position */ 41# define PHYSEL 0 /* bit position */
46# define UTMI_8BIT (0 << PHYSEL) 42# define UTMI_8BIT (0 << PHYSEL)
47# define ULPI_12PIN (1 << PHYSEL) 43# define ULPI_12PIN (1 << PHYSEL)
48# define ULPI_8PIN (2 << PHYSEL) 44# define ULPI_8PIN (2 << PHYSEL)
49#define OTG_SIMENABLE OMAP_HSOTG(0x10) 45
46#define OTG_SIMENABLE 0x410
50# define TM1 (1 << 0) 47# define TM1 (1 << 0)
51#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
52# define ENABLEFORCE (1 << 0)
53 48
54#endif /* CONFIG_ARCH_OMAP2430 */ 49#define OTG_FORCESTDBY 0x414
50# define ENABLEFORCE (1 << 0)
55 51
56#endif /* __MUSB_OMAP243X_H__ */ 52#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 88b587c703e9..ab776a8d98ca 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1118,7 +1118,7 @@ int __init musb_platform_init(struct musb *musb)
1118 } 1118 }
1119 musb->sync = mem->start; 1119 musb->sync = mem->start;
1120 1120
1121 sync = ioremap(mem->start, mem->end - mem->start + 1); 1121 sync = ioremap(mem->start, resource_size(mem));
1122 if (!sync) { 1122 if (!sync) {
1123 pr_debug("ioremap for sync failed\n"); 1123 pr_debug("ioremap for sync failed\n");
1124 ret = -ENOMEM; 1124 ret = -ENOMEM;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e13c77052e5e..1c868096bd6f 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -648,7 +648,7 @@ void dma_controller_destroy(struct dma_controller *c)
648 } 648 }
649 } 649 }
650 650
651 if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) 651 if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0)
652 omap_free_dma(tusb_dma->ch); 652 omap_free_dma(tusb_dma->ch);
653 653
654 kfree(tusb_dma); 654 kfree(tusb_dma);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 2be9f2fa41f9..3e4e9f434d78 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -36,7 +36,7 @@
36#include <linux/i2c/twl.h> 36#include <linux/i2c/twl.h>
37#include <linux/regulator/consumer.h> 37#include <linux/regulator/consumer.h>
38#include <linux/err.h> 38#include <linux/err.h>
39 39#include <linux/notifier.h>
40 40
41/* Register defines */ 41/* Register defines */
42 42
@@ -236,15 +236,6 @@
236#define PMBR1 0x0D 236#define PMBR1 0x0D
237#define GPIO_USB_4PIN_ULPI_2430C (3 << 0) 237#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
238 238
239
240
241enum linkstat {
242 USB_LINK_UNKNOWN = 0,
243 USB_LINK_NONE,
244 USB_LINK_VBUS,
245 USB_LINK_ID,
246};
247
248struct twl4030_usb { 239struct twl4030_usb {
249 struct otg_transceiver otg; 240 struct otg_transceiver otg;
250 struct device *dev; 241 struct device *dev;
@@ -347,10 +338,10 @@ twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
347 338
348/*-------------------------------------------------------------------------*/ 339/*-------------------------------------------------------------------------*/
349 340
350static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl) 341static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
351{ 342{
352 int status; 343 int status;
353 int linkstat = USB_LINK_UNKNOWN; 344 int linkstat = USB_EVENT_NONE;
354 345
355 /* 346 /*
356 * For ID/VBUS sensing, see manual section 15.4.8 ... 347 * For ID/VBUS sensing, see manual section 15.4.8 ...
@@ -368,11 +359,11 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
368 dev_err(twl->dev, "USB link status err %d\n", status); 359 dev_err(twl->dev, "USB link status err %d\n", status);
369 else if (status & (BIT(7) | BIT(2))) { 360 else if (status & (BIT(7) | BIT(2))) {
370 if (status & BIT(2)) 361 if (status & BIT(2))
371 linkstat = USB_LINK_ID; 362 linkstat = USB_EVENT_ID;
372 else 363 else
373 linkstat = USB_LINK_VBUS; 364 linkstat = USB_EVENT_VBUS;
374 } else 365 } else
375 linkstat = USB_LINK_NONE; 366 linkstat = USB_EVENT_NONE;
376 367
377 dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n", 368 dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
378 status, status, linkstat); 369 status, status, linkstat);
@@ -383,7 +374,7 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
383 374
384 spin_lock_irq(&twl->lock); 375 spin_lock_irq(&twl->lock);
385 twl->linkstat = linkstat; 376 twl->linkstat = linkstat;
386 if (linkstat == USB_LINK_ID) { 377 if (linkstat == USB_EVENT_ID) {
387 twl->otg.default_a = true; 378 twl->otg.default_a = true;
388 twl->otg.state = OTG_STATE_A_IDLE; 379 twl->otg.state = OTG_STATE_A_IDLE;
389 } else { 380 } else {
@@ -564,7 +555,7 @@ static ssize_t twl4030_usb_vbus_show(struct device *dev,
564 555
565 spin_lock_irqsave(&twl->lock, flags); 556 spin_lock_irqsave(&twl->lock, flags);
566 ret = sprintf(buf, "%s\n", 557 ret = sprintf(buf, "%s\n",
567 (twl->linkstat == USB_LINK_VBUS) ? "on" : "off"); 558 (twl->linkstat == USB_EVENT_VBUS) ? "on" : "off");
568 spin_unlock_irqrestore(&twl->lock, flags); 559 spin_unlock_irqrestore(&twl->lock, flags);
569 560
570 return ret; 561 return ret;
@@ -576,17 +567,8 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
576 struct twl4030_usb *twl = _twl; 567 struct twl4030_usb *twl = _twl;
577 int status; 568 int status;
578 569
579#ifdef CONFIG_LOCKDEP
580 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
581 * we don't want and can't tolerate. Although it might be
582 * friendlier not to borrow this thread context...
583 */
584 local_irq_enable();
585#endif
586
587 status = twl4030_usb_linkstat(twl); 570 status = twl4030_usb_linkstat(twl);
588 if (status != USB_LINK_UNKNOWN) { 571 if (status >= 0) {
589
590 /* FIXME add a set_power() method so that B-devices can 572 /* FIXME add a set_power() method so that B-devices can
591 * configure the charger appropriately. It's not always 573 * configure the charger appropriately. It's not always
592 * correct to consume VBUS power, and how much current to 574 * correct to consume VBUS power, and how much current to
@@ -598,12 +580,13 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
598 * USB_LINK_VBUS state. musb_hdrc won't care until it 580 * USB_LINK_VBUS state. musb_hdrc won't care until it
599 * starts to handle softconnect right. 581 * starts to handle softconnect right.
600 */ 582 */
601 if (status == USB_LINK_NONE) 583 if (status == USB_EVENT_NONE)
602 twl4030_phy_suspend(twl, 0); 584 twl4030_phy_suspend(twl, 0);
603 else 585 else
604 twl4030_phy_resume(twl); 586 twl4030_phy_resume(twl);
605 587
606 twl4030charger_usb_en(status == USB_LINK_VBUS); 588 blocking_notifier_call_chain(&twl->otg.notifier, status,
589 twl->otg.gadget);
607 } 590 }
608 sysfs_notify(&twl->dev->kobj, NULL, "vbus"); 591 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
609 592
@@ -693,6 +676,8 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
693 if (device_create_file(&pdev->dev, &dev_attr_vbus)) 676 if (device_create_file(&pdev->dev, &dev_attr_vbus))
694 dev_warn(&pdev->dev, "could not create sysfs file\n"); 677 dev_warn(&pdev->dev, "could not create sysfs file\n");
695 678
679 BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
680
696 /* Our job is to use irqs and status from the power module 681 /* Our job is to use irqs and status from the power module
697 * to keep the transceiver disabled when nothing's connected. 682 * to keep the transceiver disabled when nothing's connected.
698 * 683 *
@@ -702,7 +687,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
702 * need both handles, otherwise just one suffices. 687 * need both handles, otherwise just one suffices.
703 */ 688 */
704 twl->irq_enabled = true; 689 twl->irq_enabled = true;
705 status = request_irq(twl->irq, twl4030_usb_irq, 690 status = request_threaded_irq(twl->irq, NULL, twl4030_usb_irq,
706 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 691 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
707 "twl4030_usb", twl); 692 "twl4030_usb", twl);
708 if (status < 0) { 693 if (status < 0) {
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c480ea4c19f2..c78b255e3f83 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -472,6 +472,17 @@ config USB_SERIAL_OTI6858
472 To compile this driver as a module, choose M here: the 472 To compile this driver as a module, choose M here: the
473 module will be called oti6858. 473 module will be called oti6858.
474 474
475config USB_SERIAL_QCAUX
476 tristate "USB Qualcomm Auxiliary Serial Port Driver"
477 ---help---
478 Say Y here if you want to use the auxiliary serial ports provided
479 by many modems based on Qualcomm chipsets. These ports often use
480 a proprietary protocol called DM and cannot be used for AT- or
481 PPP-based communication.
482
483 To compile this driver as a module, choose M here: the
484 module will be called moto_modem. If unsure, choose N.
485
475config USB_SERIAL_QUALCOMM 486config USB_SERIAL_QUALCOMM
476 tristate "USB Qualcomm Serial modem" 487 tristate "USB Qualcomm Serial modem"
477 help 488 help
@@ -600,6 +611,14 @@ config USB_SERIAL_OPTICON
600 To compile this driver as a module, choose M here: the 611 To compile this driver as a module, choose M here: the
601 module will be called opticon. 612 module will be called opticon.
602 613
614config USB_SERIAL_VIVOPAY_SERIAL
615 tristate "USB ViVOpay serial interface driver"
616 help
617 Say Y here if you want to use a ViVOtech ViVOpay USB device.
618
619 To compile this driver as a module, choose M here: the
620 module will be called vivopay-serial.
621
603config USB_SERIAL_DEBUG 622config USB_SERIAL_DEBUG
604 tristate "USB Debugging Device" 623 tristate "USB Debugging Device"
605 help 624 help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 66619beb6cc0..83c9e431a568 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
45obj-$(CONFIG_USB_SERIAL_OPTION) += option.o 45obj-$(CONFIG_USB_SERIAL_OPTION) += option.o
46obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o 46obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o
47obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o 47obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o
48obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o
48obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o 49obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o
49obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o 50obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
50obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o 51obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
55obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o 56obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
56obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o 57obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
57obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o 58obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
59obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
58 60
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b10ac8409411..365db1097bfd 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -78,7 +78,7 @@ static int debug;
78#define DRIVER_DESC "AIRcable USB Driver" 78#define DRIVER_DESC "AIRcable USB Driver"
79 79
80/* ID table that will be registered with USB core */ 80/* ID table that will be registered with USB core */
81static struct usb_device_id id_table [] = { 81static const struct usb_device_id id_table[] = {
82 { USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) }, 82 { USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) },
83 { }, 83 { },
84}; 84};
@@ -468,10 +468,6 @@ static void aircable_read_bulk_callback(struct urb *urb)
468 468
469 if (status) { 469 if (status) {
470 dbg("%s - urb status = %d", __func__, status); 470 dbg("%s - urb status = %d", __func__, status);
471 if (!port->port.count) {
472 dbg("%s - port is closed, exiting.", __func__);
473 return;
474 }
475 if (status == -EPROTO) { 471 if (status == -EPROTO) {
476 dbg("%s - caught -EPROTO, resubmitting the urb", 472 dbg("%s - caught -EPROTO, resubmitting the urb",
477 __func__); 473 __func__);
@@ -530,23 +526,19 @@ static void aircable_read_bulk_callback(struct urb *urb)
530 } 526 }
531 tty_kref_put(tty); 527 tty_kref_put(tty);
532 528
533 /* Schedule the next read _if_ we are still open */ 529 /* Schedule the next read */
534 if (port->port.count) { 530 usb_fill_bulk_urb(port->read_urb, port->serial->dev,
535 usb_fill_bulk_urb(port->read_urb, port->serial->dev, 531 usb_rcvbulkpipe(port->serial->dev,
536 usb_rcvbulkpipe(port->serial->dev, 532 port->bulk_in_endpointAddress),
537 port->bulk_in_endpointAddress), 533 port->read_urb->transfer_buffer,
538 port->read_urb->transfer_buffer, 534 port->read_urb->transfer_buffer_length,
539 port->read_urb->transfer_buffer_length, 535 aircable_read_bulk_callback, port);
540 aircable_read_bulk_callback, port); 536
541 537 result = usb_submit_urb(urb, GFP_ATOMIC);
542 result = usb_submit_urb(urb, GFP_ATOMIC); 538 if (result && result != -EPERM)
543 if (result) 539 dev_err(&urb->dev->dev,
544 dev_err(&urb->dev->dev, 540 "%s - failed resubmitting read urb, error %d\n",
545 "%s - failed resubmitting read urb, error %d\n", 541 __func__, result);
546 __func__, result);
547 }
548
549 return;
550} 542}
551 543
552/* Based on ftdi_sio.c throttle */ 544/* Based on ftdi_sio.c throttle */
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index a9c2dec8e3fb..547c9448c28c 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -50,7 +50,7 @@ static int debug;
50/* usb timeout of 1 second */ 50/* usb timeout of 1 second */
51#define ARK_TIMEOUT (1*HZ) 51#define ARK_TIMEOUT (1*HZ)
52 52
53static struct usb_device_id id_table [] = { 53static const struct usb_device_id id_table[] = {
54 { USB_DEVICE(0x6547, 0x0232) }, 54 { USB_DEVICE(0x6547, 0x0232) },
55 { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */ 55 { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
56 { }, 56 { },
@@ -733,7 +733,6 @@ static void ark3116_read_bulk_callback(struct urb *urb)
733 733
734 tty = tty_port_tty_get(&port->port); 734 tty = tty_port_tty_get(&port->port);
735 if (tty) { 735 if (tty) {
736 tty_buffer_request_room(tty, urb->actual_length + 1);
737 /* overrun is special, not associated with a char */ 736 /* overrun is special, not associated with a char */
738 if (unlikely(lsr & UART_LSR_OE)) 737 if (unlikely(lsr & UART_LSR_OE))
739 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 738 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index a0467bc61627..1295e44e3f1c 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -103,7 +103,7 @@ static int belkin_sa_tiocmset(struct tty_struct *tty, struct file *file,
103 unsigned int set, unsigned int clear); 103 unsigned int set, unsigned int clear);
104 104
105 105
106static struct usb_device_id id_table_combined [] = { 106static const struct usb_device_id id_table_combined[] = {
107 { USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) }, 107 { USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
108 { USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) }, 108 { USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
109 { USB_DEVICE(PERACOM_VID, PERACOM_PID) }, 109 { USB_DEVICE(PERACOM_VID, PERACOM_PID) },
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 59eff721fcc5..9f4fed1968b5 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -22,6 +22,7 @@
22#include <linux/usb.h> 22#include <linux/usb.h>
23#include <linux/usb/serial.h> 23#include <linux/usb/serial.h>
24#include <linux/serial.h> 24#include <linux/serial.h>
25#include <asm/unaligned.h>
25 26
26#define DEFAULT_BAUD_RATE 9600 27#define DEFAULT_BAUD_RATE 9600
27#define DEFAULT_TIMEOUT 1000 28#define DEFAULT_TIMEOUT 1000
@@ -70,7 +71,7 @@
70 71
71static int debug; 72static int debug;
72 73
73static struct usb_device_id id_table [] = { 74static const struct usb_device_id id_table[] = {
74 { USB_DEVICE(0x4348, 0x5523) }, 75 { USB_DEVICE(0x4348, 0x5523) },
75 { USB_DEVICE(0x1a86, 0x7523) }, 76 { USB_DEVICE(0x1a86, 0x7523) },
76 { }, 77 { },
@@ -392,16 +393,22 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
392 struct usb_serial_port *port = tty->driver_data; 393 struct usb_serial_port *port = tty->driver_data;
393 int r; 394 int r;
394 uint16_t reg_contents; 395 uint16_t reg_contents;
395 uint8_t break_reg[2]; 396 uint8_t *break_reg;
396 397
397 dbg("%s()", __func__); 398 dbg("%s()", __func__);
398 399
400 break_reg = kmalloc(2, GFP_KERNEL);
401 if (!break_reg) {
402 dev_err(&port->dev, "%s - kmalloc failed\n", __func__);
403 return;
404 }
405
399 r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG, 406 r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
400 ch341_break_reg, 0, break_reg, sizeof(break_reg)); 407 ch341_break_reg, 0, break_reg, 2);
401 if (r < 0) { 408 if (r < 0) {
402 printk(KERN_WARNING "%s: USB control read error whilst getting" 409 dev_err(&port->dev, "%s - USB control read error (%d)\n",
403 " break register contents.\n", __FILE__); 410 __func__, r);
404 return; 411 goto out;
405 } 412 }
406 dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x", 413 dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x",
407 __func__, break_reg[0], break_reg[1]); 414 __func__, break_reg[0], break_reg[1]);
@@ -416,12 +423,14 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
416 } 423 }
417 dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x", 424 dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x",
418 __func__, break_reg[0], break_reg[1]); 425 __func__, break_reg[0], break_reg[1]);
419 reg_contents = (uint16_t)break_reg[0] | ((uint16_t)break_reg[1] << 8); 426 reg_contents = get_unaligned_le16(break_reg);
420 r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG, 427 r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
421 ch341_break_reg, reg_contents); 428 ch341_break_reg, reg_contents);
422 if (r < 0) 429 if (r < 0)
423 printk(KERN_WARNING "%s: USB control write error whilst setting" 430 dev_err(&port->dev, "%s - USB control write error (%d)\n",
424 " break register contents.\n", __FILE__); 431 __func__, r);
432out:
433 kfree(break_reg);
425} 434}
426 435
427static int ch341_tiocmset(struct tty_struct *tty, struct file *file, 436static int ch341_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index bd254ec97d14..507382b0a9ed 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -55,7 +55,7 @@ static int cp210x_carrier_raised(struct usb_serial_port *p);
55 55
56static int debug; 56static int debug;
57 57
58static struct usb_device_id id_table [] = { 58static const struct usb_device_id id_table[] = {
59 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 59 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
60 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 60 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
61 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ 61 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
@@ -91,11 +91,12 @@ static struct usb_device_id id_table [] = {
91 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ 91 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
92 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ 92 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
93 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ 93 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
94 { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
94 { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */ 95 { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
95 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ 96 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
96 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ 97 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
97 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */ 98 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
98 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 99 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */
99 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 100 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
100 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 101 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
101 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ 102 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -612,7 +613,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
612 baud); 613 baud);
613 if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV, 614 if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
614 ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) { 615 ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
615 dbg("Baud rate requested not supported by device\n"); 616 dbg("Baud rate requested not supported by device");
616 baud = tty_termios_baud_rate(old_termios); 617 baud = tty_termios_baud_rate(old_termios);
617 } 618 }
618 } 619 }
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index b0f6402a91ca..f744ab7a3b19 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -70,7 +70,7 @@ static void cyberjack_read_int_callback(struct urb *urb);
70static void cyberjack_read_bulk_callback(struct urb *urb); 70static void cyberjack_read_bulk_callback(struct urb *urb);
71static void cyberjack_write_bulk_callback(struct urb *urb); 71static void cyberjack_write_bulk_callback(struct urb *urb);
72 72
73static struct usb_device_id id_table [] = { 73static const struct usb_device_id id_table[] = {
74 { USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) }, 74 { USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) },
75 { } /* Terminating entry */ 75 { } /* Terminating entry */
76}; 76};
@@ -391,11 +391,10 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
391 391
392 tty = tty_port_tty_get(&port->port); 392 tty = tty_port_tty_get(&port->port);
393 if (!tty) { 393 if (!tty) {
394 dbg("%s - ignoring since device not open\n", __func__); 394 dbg("%s - ignoring since device not open", __func__);
395 return; 395 return;
396 } 396 }
397 if (urb->actual_length) { 397 if (urb->actual_length) {
398 tty_buffer_request_room(tty, urb->actual_length);
399 tty_insert_flip_string(tty, data, urb->actual_length); 398 tty_insert_flip_string(tty, data, urb->actual_length);
400 tty_flip_buffer_push(tty); 399 tty_flip_buffer_push(tty);
401 } 400 }
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index a591ebec0f89..baf74b44e6ed 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -66,17 +66,15 @@
66#include <linux/serial.h> 66#include <linux/serial.h>
67#include <linux/delay.h> 67#include <linux/delay.h>
68#include <linux/uaccess.h> 68#include <linux/uaccess.h>
69#include <asm/unaligned.h>
69 70
70#include "cypress_m8.h" 71#include "cypress_m8.h"
71 72
72 73
73#ifdef CONFIG_USB_SERIAL_DEBUG 74static int debug;
74 static int debug = 1;
75#else
76 static int debug;
77#endif
78static int stats; 75static int stats;
79static int interval; 76static int interval;
77static int unstable_bauds;
80 78
81/* 79/*
82 * Version Information 80 * Version Information
@@ -89,24 +87,24 @@ static int interval;
89#define CYPRESS_BUF_SIZE 1024 87#define CYPRESS_BUF_SIZE 1024
90#define CYPRESS_CLOSING_WAIT (30*HZ) 88#define CYPRESS_CLOSING_WAIT (30*HZ)
91 89
92static struct usb_device_id id_table_earthmate [] = { 90static const struct usb_device_id id_table_earthmate[] = {
93 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, 91 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
94 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, 92 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
95 { } /* Terminating entry */ 93 { } /* Terminating entry */
96}; 94};
97 95
98static struct usb_device_id id_table_cyphidcomrs232 [] = { 96static const struct usb_device_id id_table_cyphidcomrs232[] = {
99 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 97 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
100 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, 98 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
101 { } /* Terminating entry */ 99 { } /* Terminating entry */
102}; 100};
103 101
104static struct usb_device_id id_table_nokiaca42v2 [] = { 102static const struct usb_device_id id_table_nokiaca42v2[] = {
105 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, 103 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
106 { } /* Terminating entry */ 104 { } /* Terminating entry */
107}; 105};
108 106
109static struct usb_device_id id_table_combined [] = { 107static const struct usb_device_id id_table_combined[] = {
110 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, 108 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
111 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, 109 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
112 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 110 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
@@ -295,6 +293,9 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
295 struct cypress_private *priv; 293 struct cypress_private *priv;
296 priv = usb_get_serial_port_data(port); 294 priv = usb_get_serial_port_data(port);
297 295
296 if (unstable_bauds)
297 return new_rate;
298
298 /* 299 /*
299 * The general purpose firmware for the Cypress M8 allows for 300 * The general purpose firmware for the Cypress M8 allows for
300 * a maximum speed of 57600bps (I have no idea whether DeLorme 301 * a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -344,7 +345,8 @@ static int cypress_serial_control(struct tty_struct *tty,
344{ 345{
345 int new_baudrate = 0, retval = 0, tries = 0; 346 int new_baudrate = 0, retval = 0, tries = 0;
346 struct cypress_private *priv; 347 struct cypress_private *priv;
347 __u8 feature_buffer[5]; 348 u8 *feature_buffer;
349 const unsigned int feature_len = 5;
348 unsigned long flags; 350 unsigned long flags;
349 351
350 dbg("%s", __func__); 352 dbg("%s", __func__);
@@ -354,17 +356,18 @@ static int cypress_serial_control(struct tty_struct *tty,
354 if (!priv->comm_is_ok) 356 if (!priv->comm_is_ok)
355 return -ENODEV; 357 return -ENODEV;
356 358
359 feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL);
360 if (!feature_buffer)
361 return -ENOMEM;
362
357 switch (cypress_request_type) { 363 switch (cypress_request_type) {
358 case CYPRESS_SET_CONFIG: 364 case CYPRESS_SET_CONFIG:
359 new_baudrate = priv->baud_rate;
360 /* 0 means 'Hang up' so doesn't change the true bit rate */ 365 /* 0 means 'Hang up' so doesn't change the true bit rate */
361 if (baud_rate == 0) 366 new_baudrate = priv->baud_rate;
362 new_baudrate = priv->baud_rate; 367 if (baud_rate && baud_rate != priv->baud_rate) {
363 /* Change of speed ? */
364 else if (baud_rate != priv->baud_rate) {
365 dbg("%s - baud rate is changing", __func__); 368 dbg("%s - baud rate is changing", __func__);
366 retval = analyze_baud_rate(port, baud_rate); 369 retval = analyze_baud_rate(port, baud_rate);
367 if (retval >= 0) { 370 if (retval >= 0) {
368 new_baudrate = retval; 371 new_baudrate = retval;
369 dbg("%s - New baud rate set to %d", 372 dbg("%s - New baud rate set to %d",
370 __func__, new_baudrate); 373 __func__, new_baudrate);
@@ -373,9 +376,8 @@ static int cypress_serial_control(struct tty_struct *tty,
373 dbg("%s - baud rate is being sent as %d", 376 dbg("%s - baud rate is being sent as %d",
374 __func__, new_baudrate); 377 __func__, new_baudrate);
375 378
376 memset(feature_buffer, 0, sizeof(feature_buffer));
377 /* fill the feature_buffer with new configuration */ 379 /* fill the feature_buffer with new configuration */
378 *((u_int32_t *)feature_buffer) = new_baudrate; 380 put_unaligned_le32(new_baudrate, feature_buffer);
379 feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */ 381 feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */
380 /* 1 bit gap */ 382 /* 1 bit gap */
381 feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */ 383 feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */
@@ -397,15 +399,15 @@ static int cypress_serial_control(struct tty_struct *tty,
397 HID_REQ_SET_REPORT, 399 HID_REQ_SET_REPORT,
398 USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 400 USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
399 0x0300, 0, feature_buffer, 401 0x0300, 0, feature_buffer,
400 sizeof(feature_buffer), 500); 402 feature_len, 500);
401 403
402 if (tries++ >= 3) 404 if (tries++ >= 3)
403 break; 405 break;
404 406
405 } while (retval != sizeof(feature_buffer) && 407 } while (retval != feature_len &&
406 retval != -ENODEV); 408 retval != -ENODEV);
407 409
408 if (retval != sizeof(feature_buffer)) { 410 if (retval != feature_len) {
409 dev_err(&port->dev, "%s - failed sending serial " 411 dev_err(&port->dev, "%s - failed sending serial "
410 "line settings - %d\n", __func__, retval); 412 "line settings - %d\n", __func__, retval);
411 cypress_set_dead(port); 413 cypress_set_dead(port);
@@ -425,43 +427,42 @@ static int cypress_serial_control(struct tty_struct *tty,
425 /* Not implemented for this device, 427 /* Not implemented for this device,
426 and if we try to do it we're likely 428 and if we try to do it we're likely
427 to crash the hardware. */ 429 to crash the hardware. */
428 return -ENOTTY; 430 retval = -ENOTTY;
431 goto out;
429 } 432 }
430 dbg("%s - retreiving serial line settings", __func__); 433 dbg("%s - retreiving serial line settings", __func__);
431 /* set initial values in feature buffer */
432 memset(feature_buffer, 0, sizeof(feature_buffer));
433
434 do { 434 do {
435 retval = usb_control_msg(port->serial->dev, 435 retval = usb_control_msg(port->serial->dev,
436 usb_rcvctrlpipe(port->serial->dev, 0), 436 usb_rcvctrlpipe(port->serial->dev, 0),
437 HID_REQ_GET_REPORT, 437 HID_REQ_GET_REPORT,
438 USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 438 USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
439 0x0300, 0, feature_buffer, 439 0x0300, 0, feature_buffer,
440 sizeof(feature_buffer), 500); 440 feature_len, 500);
441 441
442 if (tries++ >= 3) 442 if (tries++ >= 3)
443 break; 443 break;
444 } while (retval != sizeof(feature_buffer) 444 } while (retval != feature_len
445 && retval != -ENODEV); 445 && retval != -ENODEV);
446 446
447 if (retval != sizeof(feature_buffer)) { 447 if (retval != feature_len) {
448 dev_err(&port->dev, "%s - failed to retrieve serial " 448 dev_err(&port->dev, "%s - failed to retrieve serial "
449 "line settings - %d\n", __func__, retval); 449 "line settings - %d\n", __func__, retval);
450 cypress_set_dead(port); 450 cypress_set_dead(port);
451 return retval; 451 goto out;
452 } else { 452 } else {
453 spin_lock_irqsave(&priv->lock, flags); 453 spin_lock_irqsave(&priv->lock, flags);
454 /* store the config in one byte, and later 454 /* store the config in one byte, and later
455 use bit masks to check values */ 455 use bit masks to check values */
456 priv->current_config = feature_buffer[4]; 456 priv->current_config = feature_buffer[4];
457 priv->baud_rate = *((u_int32_t *)feature_buffer); 457 priv->baud_rate = get_unaligned_le32(feature_buffer);
458 spin_unlock_irqrestore(&priv->lock, flags); 458 spin_unlock_irqrestore(&priv->lock, flags);
459 } 459 }
460 } 460 }
461 spin_lock_irqsave(&priv->lock, flags); 461 spin_lock_irqsave(&priv->lock, flags);
462 ++priv->cmd_count; 462 ++priv->cmd_count;
463 spin_unlock_irqrestore(&priv->lock, flags); 463 spin_unlock_irqrestore(&priv->lock, flags);
464 464out:
465 kfree(feature_buffer);
465 return retval; 466 return retval;
466} /* cypress_serial_control */ 467} /* cypress_serial_control */
467 468
@@ -690,7 +691,6 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on)
690{ 691{
691 struct cypress_private *priv = usb_get_serial_port_data(port); 692 struct cypress_private *priv = usb_get_serial_port_data(port);
692 /* drop dtr and rts */ 693 /* drop dtr and rts */
693 priv = usb_get_serial_port_data(port);
694 spin_lock_irq(&priv->lock); 694 spin_lock_irq(&priv->lock);
695 if (on == 0) 695 if (on == 0)
696 priv->line_control = 0; 696 priv->line_control = 0;
@@ -1307,13 +1307,9 @@ static void cypress_read_int_callback(struct urb *urb)
1307 spin_unlock_irqrestore(&priv->lock, flags); 1307 spin_unlock_irqrestore(&priv->lock, flags);
1308 1308
1309 /* process read if there is data other than line status */ 1309 /* process read if there is data other than line status */
1310 if (tty && (bytes > i)) { 1310 if (tty && bytes > i) {
1311 bytes = tty_buffer_request_room(tty, bytes); 1311 tty_insert_flip_string_fixed_flag(tty, data + i,
1312 for (; i < bytes ; ++i) { 1312 bytes - i, tty_flag);
1313 dbg("pushing byte number %d - %d - %c", i, data[i],
1314 data[i]);
1315 tty_insert_flip_char(tty, data[i], tty_flag);
1316 }
1317 tty_flip_buffer_push(tty); 1313 tty_flip_buffer_push(tty);
1318 } 1314 }
1319 1315
@@ -1325,9 +1321,9 @@ static void cypress_read_int_callback(struct urb *urb)
1325continue_read: 1321continue_read:
1326 tty_kref_put(tty); 1322 tty_kref_put(tty);
1327 1323
1328 /* Continue trying to always read... unless the port has closed. */ 1324 /* Continue trying to always read */
1329 1325
1330 if (port->port.count > 0 && priv->comm_is_ok) { 1326 if (priv->comm_is_ok) {
1331 usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev, 1327 usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev,
1332 usb_rcvintpipe(port->serial->dev, 1328 usb_rcvintpipe(port->serial->dev,
1333 port->interrupt_in_endpointAddress), 1329 port->interrupt_in_endpointAddress),
@@ -1336,7 +1332,7 @@ continue_read:
1336 cypress_read_int_callback, port, 1332 cypress_read_int_callback, port,
1337 priv->read_urb_interval); 1333 priv->read_urb_interval);
1338 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); 1334 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
1339 if (result) { 1335 if (result && result != -EPERM) {
1340 dev_err(&urb->dev->dev, "%s - failed resubmitting " 1336 dev_err(&urb->dev->dev, "%s - failed resubmitting "
1341 "read urb, error %d\n", __func__, 1337 "read urb, error %d\n", __func__,
1342 result); 1338 result);
@@ -1650,3 +1646,5 @@ module_param(stats, bool, S_IRUGO | S_IWUSR);
1650MODULE_PARM_DESC(stats, "Enable statistics or not"); 1646MODULE_PARM_DESC(stats, "Enable statistics or not");
1651module_param(interval, int, S_IRUGO | S_IWUSR); 1647module_param(interval, int, S_IRUGO | S_IWUSR);
1652MODULE_PARM_DESC(interval, "Overrides interrupt interval"); 1648MODULE_PARM_DESC(interval, "Overrides interrupt interval");
1649module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR);
1650MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68e80be6b9e1..68b0aa5e516c 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -470,18 +470,18 @@ static int digi_read_oob_callback(struct urb *urb);
470 470
471static int debug; 471static int debug;
472 472
473static struct usb_device_id id_table_combined [] = { 473static const struct usb_device_id id_table_combined[] = {
474 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) }, 474 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
475 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) }, 475 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
476 { } /* Terminating entry */ 476 { } /* Terminating entry */
477}; 477};
478 478
479static struct usb_device_id id_table_2 [] = { 479static const struct usb_device_id id_table_2[] = {
480 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) }, 480 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
481 { } /* Terminating entry */ 481 { } /* Terminating entry */
482}; 482};
483 483
484static struct usb_device_id id_table_4 [] = { 484static const struct usb_device_id id_table_4[] = {
485 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) }, 485 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
486 { } /* Terminating entry */ 486 { } /* Terminating entry */
487}; 487};
@@ -1262,10 +1262,10 @@ static void digi_write_bulk_callback(struct urb *urb)
1262 return; 1262 return;
1263 } 1263 }
1264 1264
1265 /* try to send any buffered data on this port, if it is open */ 1265 /* try to send any buffered data on this port */
1266 spin_lock(&priv->dp_port_lock); 1266 spin_lock(&priv->dp_port_lock);
1267 priv->dp_write_urb_in_use = 0; 1267 priv->dp_write_urb_in_use = 0;
1268 if (port->port.count && priv->dp_out_buf_len > 0) { 1268 if (priv->dp_out_buf_len > 0) {
1269 *((unsigned char *)(port->write_urb->transfer_buffer)) 1269 *((unsigned char *)(port->write_urb->transfer_buffer))
1270 = (unsigned char)DIGI_CMD_SEND_DATA; 1270 = (unsigned char)DIGI_CMD_SEND_DATA;
1271 *((unsigned char *)(port->write_urb->transfer_buffer) + 1) 1271 *((unsigned char *)(port->write_urb->transfer_buffer) + 1)
@@ -1288,7 +1288,7 @@ static void digi_write_bulk_callback(struct urb *urb)
1288 schedule_work(&priv->dp_wakeup_work); 1288 schedule_work(&priv->dp_wakeup_work);
1289 1289
1290 spin_unlock(&priv->dp_port_lock); 1290 spin_unlock(&priv->dp_port_lock);
1291 if (ret) 1291 if (ret && ret != -EPERM)
1292 dev_err(&port->dev, 1292 dev_err(&port->dev,
1293 "%s: usb_submit_urb failed, ret=%d, port=%d\n", 1293 "%s: usb_submit_urb failed, ret=%d, port=%d\n",
1294 __func__, ret, priv->dp_port_num); 1294 __func__, ret, priv->dp_port_num);
@@ -1353,8 +1353,7 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
1353 struct digi_port *priv = usb_get_serial_port_data(port); 1353 struct digi_port *priv = usb_get_serial_port_data(port);
1354 struct ktermios not_termios; 1354 struct ktermios not_termios;
1355 1355
1356 dbg("digi_open: TOP: port=%d, open_count=%d", 1356 dbg("digi_open: TOP: port=%d", priv->dp_port_num);
1357 priv->dp_port_num, port->port.count);
1358 1357
1359 /* be sure the device is started up */ 1358 /* be sure the device is started up */
1360 if (digi_startup_device(port->serial) != 0) 1359 if (digi_startup_device(port->serial) != 0)
@@ -1393,8 +1392,7 @@ static void digi_close(struct usb_serial_port *port)
1393 unsigned char buf[32]; 1392 unsigned char buf[32];
1394 struct digi_port *priv = usb_get_serial_port_data(port); 1393 struct digi_port *priv = usb_get_serial_port_data(port);
1395 1394
1396 dbg("digi_close: TOP: port=%d, open_count=%d", 1395 dbg("digi_close: TOP: port=%d", priv->dp_port_num);
1397 priv->dp_port_num, port->port.count);
1398 1396
1399 mutex_lock(&port->serial->disc_mutex); 1397 mutex_lock(&port->serial->disc_mutex);
1400 /* if disconnected, just clear flags */ 1398 /* if disconnected, just clear flags */
@@ -1629,7 +1627,7 @@ static void digi_read_bulk_callback(struct urb *urb)
1629 /* continue read */ 1627 /* continue read */
1630 urb->dev = port->serial->dev; 1628 urb->dev = port->serial->dev;
1631 ret = usb_submit_urb(urb, GFP_ATOMIC); 1629 ret = usb_submit_urb(urb, GFP_ATOMIC);
1632 if (ret != 0) { 1630 if (ret != 0 && ret != -EPERM) {
1633 dev_err(&port->dev, 1631 dev_err(&port->dev,
1634 "%s: failed resubmitting urb, ret=%d, port=%d\n", 1632 "%s: failed resubmitting urb, ret=%d, port=%d\n",
1635 __func__, ret, priv->dp_port_num); 1633 __func__, ret, priv->dp_port_num);
@@ -1658,12 +1656,11 @@ static int digi_read_inb_callback(struct urb *urb)
1658 int port_status = ((unsigned char *)urb->transfer_buffer)[2]; 1656 int port_status = ((unsigned char *)urb->transfer_buffer)[2];
1659 unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3; 1657 unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
1660 int flag, throttled; 1658 int flag, throttled;
1661 int i;
1662 int status = urb->status; 1659 int status = urb->status;
1663 1660
1664 /* do not process callbacks on closed ports */ 1661 /* do not process callbacks on closed ports */
1665 /* but do continue the read chain */ 1662 /* but do continue the read chain */
1666 if (port->port.count == 0) 1663 if (urb->status == -ENOENT)
1667 return 0; 1664 return 0;
1668 1665
1669 /* short/multiple packet check */ 1666 /* short/multiple packet check */
@@ -1705,17 +1702,9 @@ static int digi_read_inb_callback(struct urb *urb)
1705 1702
1706 /* data length is len-1 (one byte of len is port_status) */ 1703 /* data length is len-1 (one byte of len is port_status) */
1707 --len; 1704 --len;
1708
1709 len = tty_buffer_request_room(tty, len);
1710 if (len > 0) { 1705 if (len > 0) {
1711 /* Hot path */ 1706 tty_insert_flip_string_fixed_flag(tty, data, len,
1712 if (flag == TTY_NORMAL) 1707 flag);
1713 tty_insert_flip_string(tty, data, len);
1714 else {
1715 for (i = 0; i < len; i++)
1716 tty_insert_flip_char(tty,
1717 data[i], flag);
1718 }
1719 tty_flip_buffer_push(tty); 1708 tty_flip_buffer_push(tty);
1720 } 1709 }
1721 } 1710 }
@@ -1776,8 +1765,7 @@ static int digi_read_oob_callback(struct urb *urb)
1776 1765
1777 tty = tty_port_tty_get(&port->port); 1766 tty = tty_port_tty_get(&port->port);
1778 rts = 0; 1767 rts = 0;
1779 if (port->port.count) 1768 rts = tty->termios->c_cflag & CRTSCTS;
1780 rts = tty->termios->c_cflag & CRTSCTS;
1781 1769
1782 if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) { 1770 if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
1783 spin_lock(&priv->dp_port_lock); 1771 spin_lock(&priv->dp_port_lock);
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 7dd0e3eadbe6..5f740a1eacab 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -93,7 +93,7 @@ static void empeg_init_termios(struct tty_struct *tty);
93static void empeg_write_bulk_callback(struct urb *urb); 93static void empeg_write_bulk_callback(struct urb *urb);
94static void empeg_read_bulk_callback(struct urb *urb); 94static void empeg_read_bulk_callback(struct urb *urb);
95 95
96static struct usb_device_id id_table [] = { 96static const struct usb_device_id id_table[] = {
97 { USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) }, 97 { USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
98 { } /* Terminating entry */ 98 { } /* Terminating entry */
99}; 99};
@@ -346,7 +346,6 @@ static void empeg_read_bulk_callback(struct urb *urb)
346 tty = tty_port_tty_get(&port->port); 346 tty = tty_port_tty_get(&port->port);
347 347
348 if (urb->actual_length) { 348 if (urb->actual_length) {
349 tty_buffer_request_room(tty, urb->actual_length);
350 tty_insert_flip_string(tty, data, urb->actual_length); 349 tty_insert_flip_string(tty, data, urb->actual_length);
351 tty_flip_buffer_push(tty); 350 tty_flip_buffer_push(tty);
352 bytes_in += urb->actual_length; 351 bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7638828e7317..6af0dfa5f5ac 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -33,12 +33,12 @@
33#include <linux/errno.h> 33#include <linux/errno.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/smp_lock.h>
37#include <linux/tty.h> 36#include <linux/tty.h>
38#include <linux/tty_driver.h> 37#include <linux/tty_driver.h>
39#include <linux/tty_flip.h> 38#include <linux/tty_flip.h>
40#include <linux/module.h> 39#include <linux/module.h>
41#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/mutex.h>
42#include <linux/uaccess.h> 42#include <linux/uaccess.h>
43#include <linux/usb.h> 43#include <linux/usb.h>
44#include <linux/serial.h> 44#include <linux/serial.h>
@@ -88,10 +88,10 @@ struct ftdi_private {
88 88
89 unsigned int latency; /* latency setting in use */ 89 unsigned int latency; /* latency setting in use */
90 spinlock_t tx_lock; /* spinlock for transmit state */ 90 spinlock_t tx_lock; /* spinlock for transmit state */
91 unsigned long tx_bytes;
92 unsigned long tx_outstanding_bytes; 91 unsigned long tx_outstanding_bytes;
93 unsigned long tx_outstanding_urbs; 92 unsigned long tx_outstanding_urbs;
94 unsigned short max_packet_size; 93 unsigned short max_packet_size;
94 struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */
95}; 95};
96 96
97/* struct ftdi_sio_quirk is used by devices requiring special attention. */ 97/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -614,6 +614,7 @@ static struct usb_device_id id_table_combined [] = {
614 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, 614 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
615 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, 615 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
616 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, 616 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
617 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
617 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, 618 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
618 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 619 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
619 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, 620 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -737,6 +738,10 @@ static struct usb_device_id id_table_combined [] = {
737 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 738 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
738 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, 739 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
739 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, 740 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
741 { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
742 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
743 { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
744 { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
740 { }, /* Optional parameter entry */ 745 { }, /* Optional parameter entry */
741 { } /* Terminating entry */ 746 { } /* Terminating entry */
742}; 747};
@@ -812,7 +817,7 @@ static struct usb_serial_driver ftdi_sio_device = {
812 .name = "ftdi_sio", 817 .name = "ftdi_sio",
813 }, 818 },
814 .description = "FTDI USB Serial Device", 819 .description = "FTDI USB Serial Device",
815 .usb_driver = &ftdi_driver , 820 .usb_driver = &ftdi_driver,
816 .id_table = id_table_combined, 821 .id_table = id_table_combined,
817 .num_ports = 1, 822 .num_ports = 1,
818 .probe = ftdi_sio_probe, 823 .probe = ftdi_sio_probe,
@@ -828,8 +833,8 @@ static struct usb_serial_driver ftdi_sio_device = {
828 .chars_in_buffer = ftdi_chars_in_buffer, 833 .chars_in_buffer = ftdi_chars_in_buffer,
829 .read_bulk_callback = ftdi_read_bulk_callback, 834 .read_bulk_callback = ftdi_read_bulk_callback,
830 .write_bulk_callback = ftdi_write_bulk_callback, 835 .write_bulk_callback = ftdi_write_bulk_callback,
831 .tiocmget = ftdi_tiocmget, 836 .tiocmget = ftdi_tiocmget,
832 .tiocmset = ftdi_tiocmset, 837 .tiocmset = ftdi_tiocmset,
833 .ioctl = ftdi_ioctl, 838 .ioctl = ftdi_ioctl,
834 .set_termios = ftdi_set_termios, 839 .set_termios = ftdi_set_termios,
835 .break_ctl = ftdi_break_ctl, 840 .break_ctl = ftdi_break_ctl,
@@ -935,7 +940,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
935 unsigned int clear) 940 unsigned int clear)
936{ 941{
937 struct ftdi_private *priv = usb_get_serial_port_data(port); 942 struct ftdi_private *priv = usb_get_serial_port_data(port);
938 char *buf;
939 unsigned urb_value; 943 unsigned urb_value;
940 int rv; 944 int rv;
941 945
@@ -944,10 +948,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
944 return 0; /* no change */ 948 return 0; /* no change */
945 } 949 }
946 950
947 buf = kmalloc(1, GFP_NOIO);
948 if (!buf)
949 return -ENOMEM;
950
951 clear &= ~set; /* 'set' takes precedence over 'clear' */ 951 clear &= ~set; /* 'set' takes precedence over 'clear' */
952 urb_value = 0; 952 urb_value = 0;
953 if (clear & TIOCM_DTR) 953 if (clear & TIOCM_DTR)
@@ -963,9 +963,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
963 FTDI_SIO_SET_MODEM_CTRL_REQUEST, 963 FTDI_SIO_SET_MODEM_CTRL_REQUEST,
964 FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE, 964 FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
965 urb_value, priv->interface, 965 urb_value, priv->interface,
966 buf, 0, WDR_TIMEOUT); 966 NULL, 0, WDR_TIMEOUT);
967
968 kfree(buf);
969 if (rv < 0) { 967 if (rv < 0) {
970 dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s", 968 dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s",
971 __func__, 969 __func__,
@@ -1124,16 +1122,11 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
1124static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) 1122static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
1125{ 1123{
1126 struct ftdi_private *priv = usb_get_serial_port_data(port); 1124 struct ftdi_private *priv = usb_get_serial_port_data(port);
1127 char *buf;
1128 __u16 urb_value; 1125 __u16 urb_value;
1129 __u16 urb_index; 1126 __u16 urb_index;
1130 __u32 urb_index_value; 1127 __u32 urb_index_value;
1131 int rv; 1128 int rv;
1132 1129
1133 buf = kmalloc(1, GFP_NOIO);
1134 if (!buf)
1135 return -ENOMEM;
1136
1137 urb_index_value = get_ftdi_divisor(tty, port); 1130 urb_index_value = get_ftdi_divisor(tty, port);
1138 urb_value = (__u16)urb_index_value; 1131 urb_value = (__u16)urb_index_value;
1139 urb_index = (__u16)(urb_index_value >> 16); 1132 urb_index = (__u16)(urb_index_value >> 16);
@@ -1146,9 +1139,7 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
1146 FTDI_SIO_SET_BAUDRATE_REQUEST, 1139 FTDI_SIO_SET_BAUDRATE_REQUEST,
1147 FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE, 1140 FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE,
1148 urb_value, urb_index, 1141 urb_value, urb_index,
1149 buf, 0, WDR_SHORT_TIMEOUT); 1142 NULL, 0, WDR_SHORT_TIMEOUT);
1150
1151 kfree(buf);
1152 return rv; 1143 return rv;
1153} 1144}
1154 1145
@@ -1156,8 +1147,7 @@ static int write_latency_timer(struct usb_serial_port *port)
1156{ 1147{
1157 struct ftdi_private *priv = usb_get_serial_port_data(port); 1148 struct ftdi_private *priv = usb_get_serial_port_data(port);
1158 struct usb_device *udev = port->serial->dev; 1149 struct usb_device *udev = port->serial->dev;
1159 char buf[1]; 1150 int rv;
1160 int rv = 0;
1161 int l = priv->latency; 1151 int l = priv->latency;
1162 1152
1163 if (priv->flags & ASYNC_LOW_LATENCY) 1153 if (priv->flags & ASYNC_LOW_LATENCY)
@@ -1170,8 +1160,7 @@ static int write_latency_timer(struct usb_serial_port *port)
1170 FTDI_SIO_SET_LATENCY_TIMER_REQUEST, 1160 FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
1171 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, 1161 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
1172 l, priv->interface, 1162 l, priv->interface,
1173 buf, 0, WDR_TIMEOUT); 1163 NULL, 0, WDR_TIMEOUT);
1174
1175 if (rv < 0) 1164 if (rv < 0)
1176 dev_err(&port->dev, "Unable to write latency timer: %i\n", rv); 1165 dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
1177 return rv; 1166 return rv;
@@ -1181,24 +1170,29 @@ static int read_latency_timer(struct usb_serial_port *port)
1181{ 1170{
1182 struct ftdi_private *priv = usb_get_serial_port_data(port); 1171 struct ftdi_private *priv = usb_get_serial_port_data(port);
1183 struct usb_device *udev = port->serial->dev; 1172 struct usb_device *udev = port->serial->dev;
1184 unsigned short latency = 0; 1173 unsigned char *buf;
1185 int rv = 0; 1174 int rv;
1186
1187 1175
1188 dbg("%s", __func__); 1176 dbg("%s", __func__);
1189 1177
1178 buf = kmalloc(1, GFP_KERNEL);
1179 if (!buf)
1180 return -ENOMEM;
1181
1190 rv = usb_control_msg(udev, 1182 rv = usb_control_msg(udev,
1191 usb_rcvctrlpipe(udev, 0), 1183 usb_rcvctrlpipe(udev, 0),
1192 FTDI_SIO_GET_LATENCY_TIMER_REQUEST, 1184 FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
1193 FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 1185 FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
1194 0, priv->interface, 1186 0, priv->interface,
1195 (char *) &latency, 1, WDR_TIMEOUT); 1187 buf, 1, WDR_TIMEOUT);
1196 1188 if (rv < 0)
1197 if (rv < 0) {
1198 dev_err(&port->dev, "Unable to read latency timer: %i\n", rv); 1189 dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
1199 return -EIO; 1190 else
1200 } 1191 priv->latency = buf[0];
1201 return latency; 1192
1193 kfree(buf);
1194
1195 return rv;
1202} 1196}
1203 1197
1204static int get_serial_info(struct usb_serial_port *port, 1198static int get_serial_info(struct usb_serial_port *port,
@@ -1229,7 +1223,7 @@ static int set_serial_info(struct tty_struct *tty,
1229 if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) 1223 if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
1230 return -EFAULT; 1224 return -EFAULT;
1231 1225
1232 lock_kernel(); 1226 mutex_lock(&priv->cfg_lock);
1233 old_priv = *priv; 1227 old_priv = *priv;
1234 1228
1235 /* Do error checking and permission checking */ 1229 /* Do error checking and permission checking */
@@ -1237,7 +1231,7 @@ static int set_serial_info(struct tty_struct *tty,
1237 if (!capable(CAP_SYS_ADMIN)) { 1231 if (!capable(CAP_SYS_ADMIN)) {
1238 if (((new_serial.flags & ~ASYNC_USR_MASK) != 1232 if (((new_serial.flags & ~ASYNC_USR_MASK) !=
1239 (priv->flags & ~ASYNC_USR_MASK))) { 1233 (priv->flags & ~ASYNC_USR_MASK))) {
1240 unlock_kernel(); 1234 mutex_unlock(&priv->cfg_lock);
1241 return -EPERM; 1235 return -EPERM;
1242 } 1236 }
1243 priv->flags = ((priv->flags & ~ASYNC_USR_MASK) | 1237 priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
@@ -1248,7 +1242,7 @@ static int set_serial_info(struct tty_struct *tty,
1248 1242
1249 if ((new_serial.baud_base != priv->baud_base) && 1243 if ((new_serial.baud_base != priv->baud_base) &&
1250 (new_serial.baud_base < 9600)) { 1244 (new_serial.baud_base < 9600)) {
1251 unlock_kernel(); 1245 mutex_unlock(&priv->cfg_lock);
1252 return -EINVAL; 1246 return -EINVAL;
1253 } 1247 }
1254 1248
@@ -1278,11 +1272,11 @@ check_and_exit:
1278 (priv->flags & ASYNC_SPD_MASK)) || 1272 (priv->flags & ASYNC_SPD_MASK)) ||
1279 (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && 1273 (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
1280 (old_priv.custom_divisor != priv->custom_divisor))) { 1274 (old_priv.custom_divisor != priv->custom_divisor))) {
1281 unlock_kernel(); 1275 mutex_unlock(&priv->cfg_lock);
1282 change_speed(tty, port); 1276 change_speed(tty, port);
1283 } 1277 }
1284 else 1278 else
1285 unlock_kernel(); 1279 mutex_unlock(&priv->cfg_lock);
1286 return 0; 1280 return 0;
1287 1281
1288} /* set_serial_info */ 1282} /* set_serial_info */
@@ -1338,20 +1332,20 @@ static void ftdi_determine_type(struct usb_serial_port *port)
1338 __func__); 1332 __func__);
1339 } 1333 }
1340 } else if (version < 0x200) { 1334 } else if (version < 0x200) {
1341 /* Old device. Assume its the original SIO. */ 1335 /* Old device. Assume it's the original SIO. */
1342 priv->chip_type = SIO; 1336 priv->chip_type = SIO;
1343 priv->baud_base = 12000000 / 16; 1337 priv->baud_base = 12000000 / 16;
1344 priv->write_offset = 1; 1338 priv->write_offset = 1;
1345 } else if (version < 0x400) { 1339 } else if (version < 0x400) {
1346 /* Assume its an FT8U232AM (or FT8U245AM) */ 1340 /* Assume it's an FT8U232AM (or FT8U245AM) */
1347 /* (It might be a BM because of the iSerialNumber bug, 1341 /* (It might be a BM because of the iSerialNumber bug,
1348 * but it will still work as an AM device.) */ 1342 * but it will still work as an AM device.) */
1349 priv->chip_type = FT8U232AM; 1343 priv->chip_type = FT8U232AM;
1350 } else if (version < 0x600) { 1344 } else if (version < 0x600) {
1351 /* Assume its an FT232BM (or FT245BM) */ 1345 /* Assume it's an FT232BM (or FT245BM) */
1352 priv->chip_type = FT232BM; 1346 priv->chip_type = FT232BM;
1353 } else { 1347 } else {
1354 /* Assume its an FT232R */ 1348 /* Assume it's an FT232R */
1355 priv->chip_type = FT232RL; 1349 priv->chip_type = FT232RL;
1356 } 1350 }
1357 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); 1351 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
@@ -1371,7 +1365,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
1371 struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; 1365 struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
1372 1366
1373 unsigned num_endpoints; 1367 unsigned num_endpoints;
1374 int i = 0; 1368 int i;
1375 1369
1376 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; 1370 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
1377 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); 1371 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -1423,7 +1417,7 @@ static ssize_t store_latency_timer(struct device *dev,
1423 struct usb_serial_port *port = to_usb_serial_port(dev); 1417 struct usb_serial_port *port = to_usb_serial_port(dev);
1424 struct ftdi_private *priv = usb_get_serial_port_data(port); 1418 struct ftdi_private *priv = usb_get_serial_port_data(port);
1425 int v = simple_strtoul(valbuf, NULL, 10); 1419 int v = simple_strtoul(valbuf, NULL, 10);
1426 int rv = 0; 1420 int rv;
1427 1421
1428 priv->latency = v; 1422 priv->latency = v;
1429 rv = write_latency_timer(port); 1423 rv = write_latency_timer(port);
@@ -1440,9 +1434,8 @@ static ssize_t store_event_char(struct device *dev,
1440 struct usb_serial_port *port = to_usb_serial_port(dev); 1434 struct usb_serial_port *port = to_usb_serial_port(dev);
1441 struct ftdi_private *priv = usb_get_serial_port_data(port); 1435 struct ftdi_private *priv = usb_get_serial_port_data(port);
1442 struct usb_device *udev = port->serial->dev; 1436 struct usb_device *udev = port->serial->dev;
1443 char buf[1];
1444 int v = simple_strtoul(valbuf, NULL, 10); 1437 int v = simple_strtoul(valbuf, NULL, 10);
1445 int rv = 0; 1438 int rv;
1446 1439
1447 dbg("%s: setting event char = %i", __func__, v); 1440 dbg("%s: setting event char = %i", __func__, v);
1448 1441
@@ -1451,8 +1444,7 @@ static ssize_t store_event_char(struct device *dev,
1451 FTDI_SIO_SET_EVENT_CHAR_REQUEST, 1444 FTDI_SIO_SET_EVENT_CHAR_REQUEST,
1452 FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE, 1445 FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
1453 v, priv->interface, 1446 v, priv->interface,
1454 buf, 0, WDR_TIMEOUT); 1447 NULL, 0, WDR_TIMEOUT);
1455
1456 if (rv < 0) { 1448 if (rv < 0) {
1457 dbg("Unable to write event character: %i", rv); 1449 dbg("Unable to write event character: %i", rv);
1458 return -EIO; 1450 return -EIO;
@@ -1551,9 +1543,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1551 1543
1552 kref_init(&priv->kref); 1544 kref_init(&priv->kref);
1553 spin_lock_init(&priv->tx_lock); 1545 spin_lock_init(&priv->tx_lock);
1546 mutex_init(&priv->cfg_lock);
1554 init_waitqueue_head(&priv->delta_msr_wait); 1547 init_waitqueue_head(&priv->delta_msr_wait);
1555 /* This will push the characters through immediately rather 1548
1556 than queue a task to deliver them */
1557 priv->flags = ASYNC_LOW_LATENCY; 1549 priv->flags = ASYNC_LOW_LATENCY;
1558 1550
1559 if (quirk && quirk->port_probe) 1551 if (quirk && quirk->port_probe)
@@ -1585,7 +1577,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1585 1577
1586 ftdi_determine_type(port); 1578 ftdi_determine_type(port);
1587 ftdi_set_max_packet_size(port); 1579 ftdi_set_max_packet_size(port);
1588 read_latency_timer(port); 1580 if (read_latency_timer(port) < 0)
1581 priv->latency = 16;
1589 create_sysfs_attrs(port); 1582 create_sysfs_attrs(port);
1590 return 0; 1583 return 0;
1591} 1584}
@@ -1630,8 +1623,6 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
1630{ 1623{
1631 struct usb_device *udev = serial->dev; 1624 struct usb_device *udev = serial->dev;
1632 int latency = ndi_latency_timer; 1625 int latency = ndi_latency_timer;
1633 int rv = 0;
1634 char buf[1];
1635 1626
1636 if (latency == 0) 1627 if (latency == 0)
1637 latency = 1; 1628 latency = 1;
@@ -1641,10 +1632,11 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
1641 dbg("%s setting NDI device latency to %d", __func__, latency); 1632 dbg("%s setting NDI device latency to %d", __func__, latency);
1642 dev_info(&udev->dev, "NDI device with a latency value of %d", latency); 1633 dev_info(&udev->dev, "NDI device with a latency value of %d", latency);
1643 1634
1644 rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 1635 /* FIXME: errors are not returned */
1636 usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1645 FTDI_SIO_SET_LATENCY_TIMER_REQUEST, 1637 FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
1646 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, 1638 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
1647 latency, 0, buf, 0, WDR_TIMEOUT); 1639 latency, 0, NULL, 0, WDR_TIMEOUT);
1648 return 0; 1640 return 0;
1649} 1641}
1650 1642
@@ -1720,7 +1712,7 @@ static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
1720 urb->transfer_buffer_length, 1712 urb->transfer_buffer_length,
1721 ftdi_read_bulk_callback, port); 1713 ftdi_read_bulk_callback, port);
1722 result = usb_submit_urb(urb, mem_flags); 1714 result = usb_submit_urb(urb, mem_flags);
1723 if (result) 1715 if (result && result != -EPERM)
1724 dev_err(&port->dev, 1716 dev_err(&port->dev,
1725 "%s - failed submitting read urb, error %d\n", 1717 "%s - failed submitting read urb, error %d\n",
1726 __func__, result); 1718 __func__, result);
@@ -1732,16 +1724,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1732 struct usb_device *dev = port->serial->dev; 1724 struct usb_device *dev = port->serial->dev;
1733 struct ftdi_private *priv = usb_get_serial_port_data(port); 1725 struct ftdi_private *priv = usb_get_serial_port_data(port);
1734 unsigned long flags; 1726 unsigned long flags;
1735 1727 int result;
1736 int result = 0;
1737 char buf[1]; /* Needed for the usb_control_msg I think */
1738 1728
1739 dbg("%s", __func__); 1729 dbg("%s", __func__);
1740 1730
1741 spin_lock_irqsave(&priv->tx_lock, flags);
1742 priv->tx_bytes = 0;
1743 spin_unlock_irqrestore(&priv->tx_lock, flags);
1744
1745 write_latency_timer(port); 1731 write_latency_timer(port);
1746 1732
1747 /* No error checking for this (will get errors later anyway) */ 1733 /* No error checking for this (will get errors later anyway) */
@@ -1749,7 +1735,7 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1749 usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1735 usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1750 FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE, 1736 FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
1751 FTDI_SIO_RESET_SIO, 1737 FTDI_SIO_RESET_SIO,
1752 priv->interface, buf, 0, WDR_TIMEOUT); 1738 priv->interface, NULL, 0, WDR_TIMEOUT);
1753 1739
1754 /* Termios defaults are set by usb_serial_init. We don't change 1740 /* Termios defaults are set by usb_serial_init. We don't change
1755 port->tty->termios - this would lose speed settings, etc. 1741 port->tty->termios - this would lose speed settings, etc.
@@ -1777,7 +1763,6 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1777static void ftdi_dtr_rts(struct usb_serial_port *port, int on) 1763static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
1778{ 1764{
1779 struct ftdi_private *priv = usb_get_serial_port_data(port); 1765 struct ftdi_private *priv = usb_get_serial_port_data(port);
1780 char buf[1];
1781 1766
1782 mutex_lock(&port->serial->disc_mutex); 1767 mutex_lock(&port->serial->disc_mutex);
1783 if (!port->serial->disconnected) { 1768 if (!port->serial->disconnected) {
@@ -1786,7 +1771,7 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
1786 usb_sndctrlpipe(port->serial->dev, 0), 1771 usb_sndctrlpipe(port->serial->dev, 0),
1787 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 1772 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
1788 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 1773 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
1789 0, priv->interface, buf, 0, 1774 0, priv->interface, NULL, 0,
1790 WDR_TIMEOUT) < 0) { 1775 WDR_TIMEOUT) < 0) {
1791 dev_err(&port->dev, "error from flowcontrol urb\n"); 1776 dev_err(&port->dev, "error from flowcontrol urb\n");
1792 } 1777 }
@@ -1847,7 +1832,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
1847 spin_lock_irqsave(&priv->tx_lock, flags); 1832 spin_lock_irqsave(&priv->tx_lock, flags);
1848 if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) { 1833 if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) {
1849 spin_unlock_irqrestore(&priv->tx_lock, flags); 1834 spin_unlock_irqrestore(&priv->tx_lock, flags);
1850 dbg("%s - write limit hit\n", __func__); 1835 dbg("%s - write limit hit", __func__);
1851 return 0; 1836 return 0;
1852 } 1837 }
1853 priv->tx_outstanding_urbs++; 1838 priv->tx_outstanding_urbs++;
@@ -1927,7 +1912,6 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
1927 } else { 1912 } else {
1928 spin_lock_irqsave(&priv->tx_lock, flags); 1913 spin_lock_irqsave(&priv->tx_lock, flags);
1929 priv->tx_outstanding_bytes += count; 1914 priv->tx_outstanding_bytes += count;
1930 priv->tx_bytes += count;
1931 spin_unlock_irqrestore(&priv->tx_lock, flags); 1915 spin_unlock_irqrestore(&priv->tx_lock, flags);
1932 } 1916 }
1933 1917
@@ -2154,8 +2138,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
2154{ 2138{
2155 struct usb_serial_port *port = tty->driver_data; 2139 struct usb_serial_port *port = tty->driver_data;
2156 struct ftdi_private *priv = usb_get_serial_port_data(port); 2140 struct ftdi_private *priv = usb_get_serial_port_data(port);
2157 __u16 urb_value = 0; 2141 __u16 urb_value;
2158 char buf[1];
2159 2142
2160 /* break_state = -1 to turn on break, and 0 to turn off break */ 2143 /* break_state = -1 to turn on break, and 0 to turn off break */
2161 /* see drivers/char/tty_io.c to see it used */ 2144 /* see drivers/char/tty_io.c to see it used */
@@ -2171,7 +2154,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
2171 FTDI_SIO_SET_DATA_REQUEST, 2154 FTDI_SIO_SET_DATA_REQUEST,
2172 FTDI_SIO_SET_DATA_REQUEST_TYPE, 2155 FTDI_SIO_SET_DATA_REQUEST_TYPE,
2173 urb_value , priv->interface, 2156 urb_value , priv->interface,
2174 buf, 0, WDR_TIMEOUT) < 0) { 2157 NULL, 0, WDR_TIMEOUT) < 0) {
2175 dev_err(&port->dev, "%s FAILED to enable/disable break state " 2158 dev_err(&port->dev, "%s FAILED to enable/disable break state "
2176 "(state was %d)\n", __func__, break_state); 2159 "(state was %d)\n", __func__, break_state);
2177 } 2160 }
@@ -2195,7 +2178,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
2195 struct ktermios *termios = tty->termios; 2178 struct ktermios *termios = tty->termios;
2196 unsigned int cflag = termios->c_cflag; 2179 unsigned int cflag = termios->c_cflag;
2197 __u16 urb_value; /* will hold the new flags */ 2180 __u16 urb_value; /* will hold the new flags */
2198 char buf[1]; /* Perhaps I should dynamically alloc this? */
2199 2181
2200 /* Added for xon/xoff support */ 2182 /* Added for xon/xoff support */
2201 unsigned int iflag = termios->c_iflag; 2183 unsigned int iflag = termios->c_iflag;
@@ -2246,12 +2228,10 @@ static void ftdi_set_termios(struct tty_struct *tty,
2246 } 2228 }
2247 if (cflag & CSIZE) { 2229 if (cflag & CSIZE) {
2248 switch (cflag & CSIZE) { 2230 switch (cflag & CSIZE) {
2249 case CS5: urb_value |= 5; dbg("Setting CS5"); break;
2250 case CS6: urb_value |= 6; dbg("Setting CS6"); break;
2251 case CS7: urb_value |= 7; dbg("Setting CS7"); break; 2231 case CS7: urb_value |= 7; dbg("Setting CS7"); break;
2252 case CS8: urb_value |= 8; dbg("Setting CS8"); break; 2232 case CS8: urb_value |= 8; dbg("Setting CS8"); break;
2253 default: 2233 default:
2254 dev_err(&port->dev, "CSIZE was set but not CS5-CS8\n"); 2234 dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n");
2255 } 2235 }
2256 } 2236 }
2257 2237
@@ -2263,7 +2243,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2263 FTDI_SIO_SET_DATA_REQUEST, 2243 FTDI_SIO_SET_DATA_REQUEST,
2264 FTDI_SIO_SET_DATA_REQUEST_TYPE, 2244 FTDI_SIO_SET_DATA_REQUEST_TYPE,
2265 urb_value , priv->interface, 2245 urb_value , priv->interface,
2266 buf, 0, WDR_SHORT_TIMEOUT) < 0) { 2246 NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
2267 dev_err(&port->dev, "%s FAILED to set " 2247 dev_err(&port->dev, "%s FAILED to set "
2268 "databits/stopbits/parity\n", __func__); 2248 "databits/stopbits/parity\n", __func__);
2269 } 2249 }
@@ -2275,7 +2255,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2275 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 2255 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
2276 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2256 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2277 0, priv->interface, 2257 0, priv->interface,
2278 buf, 0, WDR_TIMEOUT) < 0) { 2258 NULL, 0, WDR_TIMEOUT) < 0) {
2279 dev_err(&port->dev, 2259 dev_err(&port->dev,
2280 "%s error from disable flowcontrol urb\n", 2260 "%s error from disable flowcontrol urb\n",
2281 __func__); 2261 __func__);
@@ -2301,7 +2281,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2301 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 2281 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
2302 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2282 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2303 0 , (FTDI_SIO_RTS_CTS_HS | priv->interface), 2283 0 , (FTDI_SIO_RTS_CTS_HS | priv->interface),
2304 buf, 0, WDR_TIMEOUT) < 0) { 2284 NULL, 0, WDR_TIMEOUT) < 0) {
2305 dev_err(&port->dev, 2285 dev_err(&port->dev,
2306 "urb failed to set to rts/cts flow control\n"); 2286 "urb failed to set to rts/cts flow control\n");
2307 } 2287 }
@@ -2333,7 +2313,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2333 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2313 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2334 urb_value , (FTDI_SIO_XON_XOFF_HS 2314 urb_value , (FTDI_SIO_XON_XOFF_HS
2335 | priv->interface), 2315 | priv->interface),
2336 buf, 0, WDR_TIMEOUT) < 0) { 2316 NULL, 0, WDR_TIMEOUT) < 0) {
2337 dev_err(&port->dev, "urb failed to set to " 2317 dev_err(&port->dev, "urb failed to set to "
2338 "xon/xoff flow control\n"); 2318 "xon/xoff flow control\n");
2339 } 2319 }
@@ -2347,7 +2327,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2347 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 2327 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
2348 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2328 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2349 0, priv->interface, 2329 0, priv->interface,
2350 buf, 0, WDR_TIMEOUT) < 0) { 2330 NULL, 0, WDR_TIMEOUT) < 0) {
2351 dev_err(&port->dev, 2331 dev_err(&port->dev,
2352 "urb failed to clear flow control\n"); 2332 "urb failed to clear flow control\n");
2353 } 2333 }
@@ -2361,21 +2341,22 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
2361{ 2341{
2362 struct usb_serial_port *port = tty->driver_data; 2342 struct usb_serial_port *port = tty->driver_data;
2363 struct ftdi_private *priv = usb_get_serial_port_data(port); 2343 struct ftdi_private *priv = usb_get_serial_port_data(port);
2364 unsigned char buf[2]; 2344 unsigned char *buf;
2345 int len;
2365 int ret; 2346 int ret;
2366 2347
2367 dbg("%s TIOCMGET", __func__); 2348 dbg("%s TIOCMGET", __func__);
2349
2350 buf = kmalloc(2, GFP_KERNEL);
2351 if (!buf)
2352 return -ENOMEM;
2353 /*
2354 * The 8U232AM returns a two byte value (the SIO a 1 byte value) in
2355 * the same format as the data returned from the in point.
2356 */
2368 switch (priv->chip_type) { 2357 switch (priv->chip_type) {
2369 case SIO: 2358 case SIO:
2370 /* Request the status from the device */ 2359 len = 1;
2371 ret = usb_control_msg(port->serial->dev,
2372 usb_rcvctrlpipe(port->serial->dev, 0),
2373 FTDI_SIO_GET_MODEM_STATUS_REQUEST,
2374 FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
2375 0, 0,
2376 buf, 1, WDR_TIMEOUT);
2377 if (ret < 0)
2378 return ret;
2379 break; 2360 break;
2380 case FT8U232AM: 2361 case FT8U232AM:
2381 case FT232BM: 2362 case FT232BM:
@@ -2383,27 +2364,30 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
2383 case FT232RL: 2364 case FT232RL:
2384 case FT2232H: 2365 case FT2232H:
2385 case FT4232H: 2366 case FT4232H:
2386 /* the 8U232AM returns a two byte value (the sio is a 1 byte 2367 len = 2;
2387 value) - in the same format as the data returned from the in
2388 point */
2389 ret = usb_control_msg(port->serial->dev,
2390 usb_rcvctrlpipe(port->serial->dev, 0),
2391 FTDI_SIO_GET_MODEM_STATUS_REQUEST,
2392 FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
2393 0, priv->interface,
2394 buf, 2, WDR_TIMEOUT);
2395 if (ret < 0)
2396 return ret;
2397 break; 2368 break;
2398 default: 2369 default:
2399 return -EFAULT; 2370 ret = -EFAULT;
2371 goto out;
2400 } 2372 }
2401 2373
2402 return (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) | 2374 ret = usb_control_msg(port->serial->dev,
2375 usb_rcvctrlpipe(port->serial->dev, 0),
2376 FTDI_SIO_GET_MODEM_STATUS_REQUEST,
2377 FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
2378 0, priv->interface,
2379 buf, len, WDR_TIMEOUT);
2380 if (ret < 0)
2381 goto out;
2382
2383 ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
2403 (buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) | 2384 (buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) |
2404 (buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) | 2385 (buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) |
2405 (buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) | 2386 (buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) |
2406 priv->last_dtr_rts; 2387 priv->last_dtr_rts;
2388out:
2389 kfree(buf);
2390 return ret;
2407} 2391}
2408 2392
2409static int ftdi_tiocmset(struct tty_struct *tty, struct file *file, 2393static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
@@ -2508,8 +2492,7 @@ void ftdi_unthrottle(struct tty_struct *tty)
2508 port->throttled = port->throttle_req = 0; 2492 port->throttled = port->throttle_req = 0;
2509 spin_unlock_irqrestore(&port->lock, flags); 2493 spin_unlock_irqrestore(&port->lock, flags);
2510 2494
2511 /* Resubmit urb if throttled and open. */ 2495 if (was_throttled)
2512 if (was_throttled && test_bit(ASYNCB_INITIALIZED, &port->port.flags))
2513 ftdi_submit_read_urb(port, GFP_KERNEL); 2496 ftdi_submit_read_urb(port, GFP_KERNEL);
2514} 2497}
2515 2498
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index b0e0d64f822e..ff9bf80327a3 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -28,13 +28,13 @@
28#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */ 28#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
29#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */ 29#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
30#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */ 30#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */
31#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modern status register */ 31#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */
32#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */ 32#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
33#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */ 33#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
34#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */ 34#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
35#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */ 35#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
36 36
37/* Interface indicies for FT2232, FT2232H and FT4232H devices*/ 37/* Interface indices for FT2232, FT2232H and FT4232H devices */
38#define INTERFACE_A 1 38#define INTERFACE_A 1
39#define INTERFACE_B 2 39#define INTERFACE_B 2
40#define INTERFACE_C 3 40#define INTERFACE_C 3
@@ -270,7 +270,7 @@ typedef enum {
270 * BmRequestType: 0100 0000b 270 * BmRequestType: 0100 0000b
271 * bRequest: FTDI_SIO_SET_FLOW_CTRL 271 * bRequest: FTDI_SIO_SET_FLOW_CTRL
272 * wValue: Xoff/Xon 272 * wValue: Xoff/Xon
273 * wIndex: Protocol/Port - hIndex is protocl / lIndex is port 273 * wIndex: Protocol/Port - hIndex is protocol / lIndex is port
274 * wLength: 0 274 * wLength: 0
275 * Data: None 275 * Data: None
276 * 276 *
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c8951aeed983..0727e198503e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,7 +22,7 @@
22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ 22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ 23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ 24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
25#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ 25#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
26#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ 26#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
27 27
28 28
@@ -49,7 +49,7 @@
49#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8 49#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
50#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9 50#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
51 51
52#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ 52#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
53 53
54/* OpenDCC (www.opendcc.de) product id */ 54/* OpenDCC (www.opendcc.de) product id */
55#define FTDI_OPENDCC_PID 0xBFD8 55#define FTDI_OPENDCC_PID 0xBFD8
@@ -185,7 +185,7 @@
185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ 185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ 186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ 187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Energy monitor EM 1010 PC */
189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ 189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ 190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ 191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
@@ -212,8 +212,8 @@
212 * drivers, or possibly the Comedi drivers in some cases. */ 212 * drivers, or possibly the Comedi drivers in some cases. */
213#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */ 213#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
214#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */ 214#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
215#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */ 215#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperatur-Feuchte-Messgeraet (TFM 100) */
216#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */ 216#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkuhr (UDF 77) */
217#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */ 217#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
218 218
219/* 219/*
@@ -320,7 +320,7 @@
320 320
321/* 321/*
322 * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485, 322 * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
323 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices 323 * USB-TTY aktiv, USB-TTY passiv. Some PIDs are used by several devices
324 * and I'm not entirely sure which are used by which. 324 * and I'm not entirely sure which are used by which.
325 */ 325 */
326#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 326#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
@@ -330,10 +330,10 @@
330 * Linx Technologies product ids 330 * Linx Technologies product ids
331 */ 331 */
332#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ 332#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
333#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */ 333#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
334#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */ 334#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
335#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */ 335#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
336#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ 336#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
337 337
338/* 338/*
339 * Oceanic product ids 339 * Oceanic product ids
@@ -494,6 +494,13 @@
494#define RATOC_PRODUCT_ID_USB60F 0xb020 494#define RATOC_PRODUCT_ID_USB60F 0xb020
495 495
496/* 496/*
497 * Contec products (http://www.contec.com)
498 * Submitted by Daniel Sangorrin
499 */
500#define CONTEC_VID 0x06CE /* Vendor ID */
501#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
502
503/*
497 * Definitions for B&B Electronics products. 504 * Definitions for B&B Electronics products.
498 */ 505 */
499#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ 506#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
@@ -642,7 +649,7 @@
642#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */ 649#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
643#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */ 650#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
644 651
645/* Larsen and Brusgaard AltiTrack/USBtrack */ 652/* Larsen and Brusgaard AltiTrack/USBtrack */
646#define LARSENBRUSGAARD_VID 0x0FD8 653#define LARSENBRUSGAARD_VID 0x0FD8
647#define LB_ALTITRACK_PID 0x0001 654#define LB_ALTITRACK_PID 0x0001
648 655
@@ -971,7 +978,7 @@
971#define ALTI2_N3_PID 0x6001 /* Neptune 3 */ 978#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
972 979
973/* 980/*
974 * Dresden Elektronic Sensor Terminal Board 981 * Dresden Elektronik Sensor Terminal Board
975 */ 982 */
976#define DE_VID 0x1cf1 /* Vendor ID */ 983#define DE_VID 0x1cf1 /* Vendor ID */
977#define STB_PID 0x0001 /* Sensor Terminal Board */ 984#define STB_PID 0x0001 /* Sensor Terminal Board */
@@ -1002,3 +1009,11 @@
1002#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/ 1009#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
1003#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/ 1010#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
1004#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */ 1011#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
1012
1013/*
1014 * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403)
1015 */
1016#define MJSG_GENERIC_PID 0x9378
1017#define MJSG_SR_RADIO_PID 0x9379
1018#define MJSG_XM_RADIO_PID 0x937A
1019#define MJSG_HD_RADIO_PID 0x937C
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index d30f736d2cc5..e21ce9ddfc63 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -18,7 +18,7 @@
18 18
19static int debug; 19static int debug;
20 20
21static struct usb_device_id id_table [] = { 21static const struct usb_device_id id_table[] = {
22 { USB_DEVICE(0x1404, 0xcddc) }, 22 { USB_DEVICE(0x1404, 0xcddc) },
23 { }, 23 { },
24}; 24};
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 5ac900e5a50e..a42b29a695b2 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -210,7 +210,7 @@ static unsigned char const PRIVATE_REQ[]
210 210
211 211
212 212
213static struct usb_device_id id_table [] = { 213static const struct usb_device_id id_table[] = {
214 /* the same device id seems to be used by all 214 /* the same device id seems to be used by all
215 usb enabled GPS devices */ 215 usb enabled GPS devices */
216 { USB_DEVICE(GARMIN_VENDOR_ID, 3) }, 216 { USB_DEVICE(GARMIN_VENDOR_ID, 3) },
@@ -271,7 +271,6 @@ static void send_to_tty(struct usb_serial_port *port,
271 usb_serial_debug_data(debug, &port->dev, 271 usb_serial_debug_data(debug, &port->dev,
272 __func__, actual_length, data); 272 __func__, actual_length, data);
273 273
274 tty_buffer_request_room(tty, actual_length);
275 tty_insert_flip_string(tty, data, actual_length); 274 tty_insert_flip_string(tty, data, actual_length);
276 tty_flip_buffer_push(tty); 275 tty_flip_buffer_push(tty);
277 } 276 }
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 83443d6306d6..89fac36684c5 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -20,6 +20,7 @@
20#include <linux/usb/serial.h> 20#include <linux/usb/serial.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/kfifo.h> 22#include <linux/kfifo.h>
23#include <linux/serial.h>
23 24
24static int debug; 25static int debug;
25 26
@@ -41,7 +42,7 @@ static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
41 42
42/* we want to look at all devices, as the vendor/product id can change 43/* we want to look at all devices, as the vendor/product id can change
43 * depending on the command line argument */ 44 * depending on the command line argument */
44static struct usb_device_id generic_serial_ids[] = { 45static const struct usb_device_id generic_serial_ids[] = {
45 {.driver_info = 42}, 46 {.driver_info = 42},
46 {} 47 {}
47}; 48};
@@ -194,7 +195,7 @@ static int usb_serial_multi_urb_write(struct tty_struct *tty,
194 if (port->urbs_in_flight > 195 if (port->urbs_in_flight >
195 port->serial->type->max_in_flight_urbs) { 196 port->serial->type->max_in_flight_urbs) {
196 spin_unlock_irqrestore(&port->lock, flags); 197 spin_unlock_irqrestore(&port->lock, flags);
197 dbg("%s - write limit hit\n", __func__); 198 dbg("%s - write limit hit", __func__);
198 return bwrite; 199 return bwrite;
199 } 200 }
200 port->tx_bytes_flight += towrite; 201 port->tx_bytes_flight += towrite;
@@ -585,7 +586,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
585 586
586 for (i = 0; i < serial->num_ports; i++) { 587 for (i = 0; i < serial->num_ports; i++) {
587 port = serial->port[i]; 588 port = serial->port[i];
588 if (!port->port.count) 589 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
589 continue; 590 continue;
590 591
591 if (port->read_urb) { 592 if (port->read_urb) {
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index 431329275133..809379159b0e 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -29,7 +29,7 @@
29#define HP_VENDOR_ID 0x03f0 29#define HP_VENDOR_ID 0x03f0
30#define HP49GP_PRODUCT_ID 0x0121 30#define HP49GP_PRODUCT_ID 0x0121
31 31
32static struct usb_device_id id_table [] = { 32static const struct usb_device_id id_table[] = {
33 { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) }, 33 { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) },
34 { } /* Terminating entry */ 34 { } /* Terminating entry */
35}; 35};
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b97960ac92f2..3ef8df0ef888 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -364,42 +364,6 @@ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial)
364 release_firmware(fw); 364 release_firmware(fw);
365} 365}
366 366
367
368/************************************************************************
369 * *
370 * Get string descriptor from device *
371 * *
372 ************************************************************************/
373static int get_string(struct usb_device *dev, int Id, char *string, int buflen)
374{
375 struct usb_string_descriptor StringDesc;
376 struct usb_string_descriptor *pStringDesc;
377
378 dbg("%s - USB String ID = %d", __func__, Id);
379
380 if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
381 &StringDesc, sizeof(StringDesc)))
382 return 0;
383
384 pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL);
385 if (!pStringDesc)
386 return 0;
387
388 if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
389 pStringDesc, StringDesc.bLength)) {
390 kfree(pStringDesc);
391 return 0;
392 }
393
394 unicode_to_ascii(string, buflen,
395 pStringDesc->wData, pStringDesc->bLength/2);
396
397 kfree(pStringDesc);
398 dbg("%s - USB String %s", __func__, string);
399 return strlen(string);
400}
401
402
403#if 0 367#if 0
404/************************************************************************ 368/************************************************************************
405 * 369 *
@@ -2007,7 +1971,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
2007 return; 1971 return;
2008 1972
2009 case IOSP_EXT_STATUS_RX_CHECK_RSP: 1973 case IOSP_EXT_STATUS_RX_CHECK_RSP:
2010 dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __func__, edge_serial->rxPort, byte3); 1974 dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============", __func__, edge_serial->rxPort, byte3);
2011 /* Port->RxCheckRsp = true; */ 1975 /* Port->RxCheckRsp = true; */
2012 return; 1976 return;
2013 } 1977 }
@@ -2075,7 +2039,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
2075 break; 2039 break;
2076 2040
2077 default: 2041 default:
2078 dbg("%s - Unrecognized IOSP status code %u\n", __func__, code); 2042 dbg("%s - Unrecognized IOSP status code %u", __func__, code);
2079 break; 2043 break;
2080 } 2044 }
2081 return; 2045 return;
@@ -2091,18 +2055,13 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
2091{ 2055{
2092 int cnt; 2056 int cnt;
2093 2057
2094 do { 2058 cnt = tty_insert_flip_string(tty, data, length);
2095 cnt = tty_buffer_request_room(tty, length); 2059 if (cnt < length) {
2096 if (cnt < length) { 2060 dev_err(dev, "%s - dropping data, %d bytes lost\n",
2097 dev_err(dev, "%s - dropping data, %d bytes lost\n", 2061 __func__, length - cnt);
2098 __func__, length - cnt); 2062 }
2099 if (cnt == 0) 2063 data += cnt;
2100 break; 2064 length -= cnt;
2101 }
2102 tty_insert_flip_string(tty, data, cnt);
2103 data += cnt;
2104 length -= cnt;
2105 } while (length > 0);
2106 2065
2107 tty_flip_buffer_push(tty); 2066 tty_flip_buffer_push(tty);
2108} 2067}
@@ -2530,7 +2489,7 @@ static int calc_baud_rate_divisor(int baudrate, int *divisor)
2530 2489
2531 *divisor = custom; 2490 *divisor = custom;
2532 2491
2533 dbg("%s - Baud %d = %d\n", __func__, baudrate, custom); 2492 dbg("%s - Baud %d = %d", __func__, baudrate, custom);
2534 return 0; 2493 return 0;
2535 } 2494 }
2536 2495
@@ -2915,7 +2874,7 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
2915 break; 2874 break;
2916 2875
2917 case EDGE_DOWNLOAD_FILE_NONE: 2876 case EDGE_DOWNLOAD_FILE_NONE:
2918 dbg ("No download file specified, skipping download\n"); 2877 dbg("No download file specified, skipping download");
2919 return; 2878 return;
2920 2879
2921 default: 2880 default:
@@ -2997,10 +2956,12 @@ static int edge_startup(struct usb_serial *serial)
2997 usb_set_serial_data(serial, edge_serial); 2956 usb_set_serial_data(serial, edge_serial);
2998 2957
2999 /* get the name for the device from the device */ 2958 /* get the name for the device from the device */
3000 i = get_string(dev, dev->descriptor.iManufacturer, 2959 i = usb_string(dev, dev->descriptor.iManufacturer,
3001 &edge_serial->name[0], MAX_NAME_LEN+1); 2960 &edge_serial->name[0], MAX_NAME_LEN+1);
2961 if (i < 0)
2962 i = 0;
3002 edge_serial->name[i++] = ' '; 2963 edge_serial->name[i++] = ' ';
3003 get_string(dev, dev->descriptor.iProduct, 2964 usb_string(dev, dev->descriptor.iProduct,
3004 &edge_serial->name[i], MAX_NAME_LEN+2 - i); 2965 &edge_serial->name[i], MAX_NAME_LEN+2 - i);
3005 2966
3006 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); 2967 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 9241d3147513..feb56a4ca799 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -14,7 +14,7 @@
14#ifndef IO_TABLES_H 14#ifndef IO_TABLES_H
15#define IO_TABLES_H 15#define IO_TABLES_H
16 16
17static struct usb_device_id edgeport_2port_id_table [] = { 17static const struct usb_device_id edgeport_2port_id_table[] = {
18 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) }, 18 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) },
19 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) }, 19 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) },
20 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) }, 20 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) },
@@ -23,7 +23,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
23 { } 23 { }
24}; 24};
25 25
26static struct usb_device_id edgeport_4port_id_table [] = { 26static const struct usb_device_id edgeport_4port_id_table[] = {
27 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, 27 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
28 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, 28 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
29 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, 29 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
@@ -37,7 +37,7 @@ static struct usb_device_id edgeport_4port_id_table [] = {
37 { } 37 { }
38}; 38};
39 39
40static struct usb_device_id edgeport_8port_id_table [] = { 40static const struct usb_device_id edgeport_8port_id_table[] = {
41 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) }, 41 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) },
42 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) }, 42 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) },
43 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) }, 43 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) },
@@ -47,7 +47,7 @@ static struct usb_device_id edgeport_8port_id_table [] = {
47 { } 47 { }
48}; 48};
49 49
50static struct usb_device_id Epic_port_id_table [] = { 50static const struct usb_device_id Epic_port_id_table[] = {
51 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) }, 51 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
52 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) }, 52 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
53 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) }, 53 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
@@ -60,7 +60,7 @@ static struct usb_device_id Epic_port_id_table [] = {
60}; 60};
61 61
62/* Devices that this driver supports */ 62/* Devices that this driver supports */
63static struct usb_device_id id_table_combined [] = { 63static const struct usb_device_id id_table_combined[] = {
64 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, 64 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
65 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, 65 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
66 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, 66 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index d4cc0f7af400..aa876f71f228 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -134,7 +134,7 @@ struct edgeport_serial {
134 134
135 135
136/* Devices that this driver supports */ 136/* Devices that this driver supports */
137static struct usb_device_id edgeport_1port_id_table [] = { 137static const struct usb_device_id edgeport_1port_id_table[] = {
138 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, 138 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
139 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, 139 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
140 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, 140 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -154,7 +154,7 @@ static struct usb_device_id edgeport_1port_id_table [] = {
154 { } 154 { }
155}; 155};
156 156
157static struct usb_device_id edgeport_2port_id_table [] = { 157static const struct usb_device_id edgeport_2port_id_table[] = {
158 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, 158 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
159 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, 159 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
160 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, 160 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
@@ -177,7 +177,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
177}; 177};
178 178
179/* Devices that this driver supports */ 179/* Devices that this driver supports */
180static struct usb_device_id id_table_combined [] = { 180static const struct usb_device_id id_table_combined[] = {
181 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, 181 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
182 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, 182 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
183 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, 183 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -413,11 +413,18 @@ static int write_boot_mem(struct edgeport_serial *serial,
413{ 413{
414 int status = 0; 414 int status = 0;
415 int i; 415 int i;
416 __u8 temp; 416 u8 *temp;
417 417
418 /* Must do a read before write */ 418 /* Must do a read before write */
419 if (!serial->TiReadI2C) { 419 if (!serial->TiReadI2C) {
420 status = read_boot_mem(serial, 0, 1, &temp); 420 temp = kmalloc(1, GFP_KERNEL);
421 if (!temp) {
422 dev_err(&serial->serial->dev->dev,
423 "%s - out of memory\n", __func__);
424 return -ENOMEM;
425 }
426 status = read_boot_mem(serial, 0, 1, temp);
427 kfree(temp);
421 if (status) 428 if (status)
422 return status; 429 return status;
423 } 430 }
@@ -935,37 +942,47 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
935static int i2c_type_bootmode(struct edgeport_serial *serial) 942static int i2c_type_bootmode(struct edgeport_serial *serial)
936{ 943{
937 int status; 944 int status;
938 __u8 data; 945 u8 *data;
946
947 data = kmalloc(1, GFP_KERNEL);
948 if (!data) {
949 dev_err(&serial->serial->dev->dev,
950 "%s - out of memory\n", __func__);
951 return -ENOMEM;
952 }
939 953
940 /* Try to read type 2 */ 954 /* Try to read type 2 */
941 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, 955 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
942 DTK_ADDR_SPACE_I2C_TYPE_II, 0, &data, 0x01); 956 DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
943 if (status) 957 if (status)
944 dbg("%s - read 2 status error = %d", __func__, status); 958 dbg("%s - read 2 status error = %d", __func__, status);
945 else 959 else
946 dbg("%s - read 2 data = 0x%x", __func__, data); 960 dbg("%s - read 2 data = 0x%x", __func__, *data);
947 if ((!status) && (data == UMP5152 || data == UMP3410)) { 961 if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
948 dbg("%s - ROM_TYPE_II", __func__); 962 dbg("%s - ROM_TYPE_II", __func__);
949 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; 963 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
950 return 0; 964 goto out;
951 } 965 }
952 966
953 /* Try to read type 3 */ 967 /* Try to read type 3 */
954 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, 968 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
955 DTK_ADDR_SPACE_I2C_TYPE_III, 0, &data, 0x01); 969 DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
956 if (status) 970 if (status)
957 dbg("%s - read 3 status error = %d", __func__, status); 971 dbg("%s - read 3 status error = %d", __func__, status);
958 else 972 else
959 dbg("%s - read 2 data = 0x%x", __func__, data); 973 dbg("%s - read 2 data = 0x%x", __func__, *data);
960 if ((!status) && (data == UMP5152 || data == UMP3410)) { 974 if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
961 dbg("%s - ROM_TYPE_III", __func__); 975 dbg("%s - ROM_TYPE_III", __func__);
962 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III; 976 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
963 return 0; 977 goto out;
964 } 978 }
965 979
966 dbg("%s - Unknown", __func__); 980 dbg("%s - Unknown", __func__);
967 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; 981 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
968 return -ENODEV; 982 status = -ENODEV;
983out:
984 kfree(data);
985 return status;
969} 986}
970 987
971static int bulk_xfer(struct usb_serial *serial, void *buffer, 988static int bulk_xfer(struct usb_serial *serial, void *buffer,
@@ -1113,7 +1130,7 @@ static int download_fw(struct edgeport_serial *serial)
1113 I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc); 1130 I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc);
1114 if (start_address != 0) { 1131 if (start_address != 0) {
1115 struct ti_i2c_firmware_rec *firmware_version; 1132 struct ti_i2c_firmware_rec *firmware_version;
1116 __u8 record; 1133 u8 *record;
1117 1134
1118 dbg("%s - Found Type FIRMWARE (Type 2) record", 1135 dbg("%s - Found Type FIRMWARE (Type 2) record",
1119 __func__); 1136 __func__);
@@ -1165,6 +1182,15 @@ static int download_fw(struct edgeport_serial *serial)
1165 OperationalMajorVersion, 1182 OperationalMajorVersion,
1166 OperationalMinorVersion); 1183 OperationalMinorVersion);
1167 1184
1185 record = kmalloc(1, GFP_KERNEL);
1186 if (!record) {
1187 dev_err(dev, "%s - out of memory.\n",
1188 __func__);
1189 kfree(firmware_version);
1190 kfree(rom_desc);
1191 kfree(ti_manuf_desc);
1192 return -ENOMEM;
1193 }
1168 /* In order to update the I2C firmware we must 1194 /* In order to update the I2C firmware we must
1169 * change the type 2 record to type 0xF2. This 1195 * change the type 2 record to type 0xF2. This
1170 * will force the UMP to come up in Boot Mode. 1196 * will force the UMP to come up in Boot Mode.
@@ -1177,13 +1203,14 @@ static int download_fw(struct edgeport_serial *serial)
1177 * firmware will update the record type from 1203 * firmware will update the record type from
1178 * 0xf2 to 0x02. 1204 * 0xf2 to 0x02.
1179 */ 1205 */
1180 record = I2C_DESC_TYPE_FIRMWARE_BLANK; 1206 *record = I2C_DESC_TYPE_FIRMWARE_BLANK;
1181 1207
1182 /* Change the I2C Firmware record type to 1208 /* Change the I2C Firmware record type to
1183 0xf2 to trigger an update */ 1209 0xf2 to trigger an update */
1184 status = write_rom(serial, start_address, 1210 status = write_rom(serial, start_address,
1185 sizeof(record), &record); 1211 sizeof(*record), record);
1186 if (status) { 1212 if (status) {
1213 kfree(record);
1187 kfree(firmware_version); 1214 kfree(firmware_version);
1188 kfree(rom_desc); 1215 kfree(rom_desc);
1189 kfree(ti_manuf_desc); 1216 kfree(ti_manuf_desc);
@@ -1196,19 +1223,21 @@ static int download_fw(struct edgeport_serial *serial)
1196 */ 1223 */
1197 status = read_rom(serial, 1224 status = read_rom(serial,
1198 start_address, 1225 start_address,
1199 sizeof(record), 1226 sizeof(*record),
1200 &record); 1227 record);
1201 if (status) { 1228 if (status) {
1229 kfree(record);
1202 kfree(firmware_version); 1230 kfree(firmware_version);
1203 kfree(rom_desc); 1231 kfree(rom_desc);
1204 kfree(ti_manuf_desc); 1232 kfree(ti_manuf_desc);
1205 return status; 1233 return status;
1206 } 1234 }
1207 1235
1208 if (record != I2C_DESC_TYPE_FIRMWARE_BLANK) { 1236 if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
1209 dev_err(dev, 1237 dev_err(dev,
1210 "%s - error resetting device\n", 1238 "%s - error resetting device\n",
1211 __func__); 1239 __func__);
1240 kfree(record);
1212 kfree(firmware_version); 1241 kfree(firmware_version);
1213 kfree(rom_desc); 1242 kfree(rom_desc);
1214 kfree(ti_manuf_desc); 1243 kfree(ti_manuf_desc);
@@ -1226,6 +1255,7 @@ static int download_fw(struct edgeport_serial *serial)
1226 __func__, status); 1255 __func__, status);
1227 1256
1228 /* return an error on purpose. */ 1257 /* return an error on purpose. */
1258 kfree(record);
1229 kfree(firmware_version); 1259 kfree(firmware_version);
1230 kfree(rom_desc); 1260 kfree(rom_desc);
1231 kfree(ti_manuf_desc); 1261 kfree(ti_manuf_desc);
@@ -1686,7 +1716,7 @@ static void edge_interrupt_callback(struct urb *urb)
1686 case TIUMP_INTERRUPT_CODE_MSR: /* MSR */ 1716 case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
1687 /* Copy MSR from UMP */ 1717 /* Copy MSR from UMP */
1688 msr = data[1]; 1718 msr = data[1];
1689 dbg("%s - ===== Port %u MSR Status = %02x ======\n", 1719 dbg("%s - ===== Port %u MSR Status = %02x ======",
1690 __func__, port_number, msr); 1720 __func__, port_number, msr);
1691 handle_new_msr(edge_port, msr); 1721 handle_new_msr(edge_port, msr);
1692 break; 1722 break;
@@ -1790,7 +1820,6 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
1790{ 1820{
1791 int queued; 1821 int queued;
1792 1822
1793 tty_buffer_request_room(tty, length);
1794 queued = tty_insert_flip_string(tty, data, length); 1823 queued = tty_insert_flip_string(tty, data, length);
1795 if (queued < length) 1824 if (queued < length)
1796 dev_err(dev, "%s - dropping data, %d bytes lost\n", 1825 dev_err(dev, "%s - dropping data, %d bytes lost\n",
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d6231c38813e..3fea9298eb15 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -747,7 +747,6 @@ static void ipaq_read_bulk_callback(struct urb *urb)
747 747
748 tty = tty_port_tty_get(&port->port); 748 tty = tty_port_tty_get(&port->port);
749 if (tty && urb->actual_length) { 749 if (tty && urb->actual_length) {
750 tty_buffer_request_room(tty, urb->actual_length);
751 tty_insert_flip_string(tty, data, urb->actual_length); 750 tty_insert_flip_string(tty, data, urb->actual_length);
752 tty_flip_buffer_push(tty); 751 tty_flip_buffer_push(tty);
753 bytes_in += urb->actual_length; 752 bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 727d323f092a..e1d07840cee6 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -134,7 +134,7 @@ enum {
134 134
135#define IPW_WANTS_TO_SEND 0x30 135#define IPW_WANTS_TO_SEND 0x30
136 136
137static struct usb_device_id usb_ipw_ids[] = { 137static const struct usb_device_id usb_ipw_ids[] = {
138 { USB_DEVICE(IPW_VID, IPW_PID) }, 138 { USB_DEVICE(IPW_VID, IPW_PID) },
139 { }, 139 { },
140}; 140};
@@ -172,7 +172,6 @@ static void ipw_read_bulk_callback(struct urb *urb)
172 172
173 tty = tty_port_tty_get(&port->port); 173 tty = tty_port_tty_get(&port->port);
174 if (tty && urb->actual_length) { 174 if (tty && urb->actual_length) {
175 tty_buffer_request_room(tty, urb->actual_length);
176 tty_insert_flip_string(tty, data, urb->actual_length); 175 tty_insert_flip_string(tty, data, urb->actual_length);
177 tty_flip_buffer_push(tty); 176 tty_flip_buffer_push(tty);
178 } 177 }
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 95d8d26b9a44..4a0f51974232 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -100,7 +100,7 @@ static u8 ir_baud;
100static u8 ir_xbof; 100static u8 ir_xbof;
101static u8 ir_add_bof; 101static u8 ir_add_bof;
102 102
103static struct usb_device_id ir_id_table[] = { 103static const struct usb_device_id ir_id_table[] = {
104 { USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */ 104 { USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */
105 { USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */ 105 { USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */
106 { USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */ 106 { USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */
@@ -445,11 +445,6 @@ static void ir_read_bulk_callback(struct urb *urb)
445 445
446 dbg("%s - port %d", __func__, port->number); 446 dbg("%s - port %d", __func__, port->number);
447 447
448 if (!port->port.count) {
449 dbg("%s - port closed.", __func__);
450 return;
451 }
452
453 switch (status) { 448 switch (status) {
454 case 0: /* Successful */ 449 case 0: /* Successful */
455 /* 450 /*
@@ -462,10 +457,8 @@ static void ir_read_bulk_callback(struct urb *urb)
462 usb_serial_debug_data(debug, &port->dev, __func__, 457 usb_serial_debug_data(debug, &port->dev, __func__,
463 urb->actual_length, data); 458 urb->actual_length, data);
464 tty = tty_port_tty_get(&port->port); 459 tty = tty_port_tty_get(&port->port);
465 if (tty_buffer_request_room(tty, urb->actual_length - 1)) { 460 tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
466 tty_insert_flip_string(tty, data+1, urb->actual_length - 1); 461 tty_flip_buffer_push(tty);
467 tty_flip_buffer_push(tty);
468 }
469 tty_kref_put(tty); 462 tty_kref_put(tty);
470 463
471 /* 464 /*
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index e6e02b178d2b..43f13cf2f016 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -43,7 +43,7 @@ static int debug;
43#define DRIVER_VERSION "v0.11" 43#define DRIVER_VERSION "v0.11"
44#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver" 44#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
45 45
46static struct usb_device_id id_table[] = { 46static const struct usb_device_id id_table[] = {
47 {USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)}, 47 {USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)},
48 {} /* Terminating entry */ 48 {} /* Terminating entry */
49}; 49};
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index f8c4b07033ff..297163c3c610 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -464,13 +464,9 @@ static void usa26_indat_callback(struct urb *urb)
464 464
465 /* Resubmit urb so we continue receiving */ 465 /* Resubmit urb so we continue receiving */
466 urb->dev = port->serial->dev; 466 urb->dev = port->serial->dev;
467 if (port->port.count) { 467 err = usb_submit_urb(urb, GFP_ATOMIC);
468 err = usb_submit_urb(urb, GFP_ATOMIC); 468 if (err != 0)
469 if (err != 0) 469 dbg("%s - resubmit read urb failed. (%d)", __func__, err);
470 dbg("%s - resubmit read urb failed. (%d)",
471 __func__, err);
472 }
473 return;
474} 470}
475 471
476/* Outdat handling is common for all devices */ 472/* Outdat handling is common for all devices */
@@ -483,8 +479,7 @@ static void usa2x_outdat_callback(struct urb *urb)
483 p_priv = usb_get_serial_port_data(port); 479 p_priv = usb_get_serial_port_data(port);
484 dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]); 480 dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]);
485 481
486 if (port->port.count) 482 usb_serial_port_softint(port);
487 usb_serial_port_softint(port);
488} 483}
489 484
490static void usa26_inack_callback(struct urb *urb) 485static void usa26_inack_callback(struct urb *urb)
@@ -615,12 +610,10 @@ static void usa28_indat_callback(struct urb *urb)
615 610
616 /* Resubmit urb so we continue receiving */ 611 /* Resubmit urb so we continue receiving */
617 urb->dev = port->serial->dev; 612 urb->dev = port->serial->dev;
618 if (port->port.count) { 613 err = usb_submit_urb(urb, GFP_ATOMIC);
619 err = usb_submit_urb(urb, GFP_ATOMIC); 614 if (err != 0)
620 if (err != 0) 615 dbg("%s - resubmit read urb failed. (%d)",
621 dbg("%s - resubmit read urb failed. (%d)", 616 __func__, err);
622 __func__, err);
623 }
624 p_priv->in_flip ^= 1; 617 p_priv->in_flip ^= 1;
625 618
626 urb = p_priv->in_urbs[p_priv->in_flip]; 619 urb = p_priv->in_urbs[p_priv->in_flip];
@@ -856,12 +849,9 @@ static void usa49_indat_callback(struct urb *urb)
856 849
857 /* Resubmit urb so we continue receiving */ 850 /* Resubmit urb so we continue receiving */
858 urb->dev = port->serial->dev; 851 urb->dev = port->serial->dev;
859 if (port->port.count) { 852 err = usb_submit_urb(urb, GFP_ATOMIC);
860 err = usb_submit_urb(urb, GFP_ATOMIC); 853 if (err != 0)
861 if (err != 0) 854 dbg("%s - resubmit read urb failed. (%d)", __func__, err);
862 dbg("%s - resubmit read urb failed. (%d)",
863 __func__, err);
864 }
865} 855}
866 856
867static void usa49wg_indat_callback(struct urb *urb) 857static void usa49wg_indat_callback(struct urb *urb)
@@ -904,11 +894,7 @@ static void usa49wg_indat_callback(struct urb *urb)
904 /* no error on any byte */ 894 /* no error on any byte */
905 i++; 895 i++;
906 for (x = 1; x < len ; ++x) 896 for (x = 1; x < len ; ++x)
907 if (port->port.count) 897 tty_insert_flip_char(tty, data[i++], 0);
908 tty_insert_flip_char(tty,
909 data[i++], 0);
910 else
911 i++;
912 } else { 898 } else {
913 /* 899 /*
914 * some bytes had errors, every byte has status 900 * some bytes had errors, every byte has status
@@ -922,14 +908,12 @@ static void usa49wg_indat_callback(struct urb *urb)
922 if (stat & RXERROR_PARITY) 908 if (stat & RXERROR_PARITY)
923 flag |= TTY_PARITY; 909 flag |= TTY_PARITY;
924 /* XXX should handle break (0x10) */ 910 /* XXX should handle break (0x10) */
925 if (port->port.count) 911 tty_insert_flip_char(tty,
926 tty_insert_flip_char(tty,
927 data[i+1], flag); 912 data[i+1], flag);
928 i += 2; 913 i += 2;
929 } 914 }
930 } 915 }
931 if (port->port.count) 916 tty_flip_buffer_push(tty);
932 tty_flip_buffer_push(tty);
933 tty_kref_put(tty); 917 tty_kref_put(tty);
934 } 918 }
935 } 919 }
@@ -1013,13 +997,9 @@ static void usa90_indat_callback(struct urb *urb)
1013 997
1014 /* Resubmit urb so we continue receiving */ 998 /* Resubmit urb so we continue receiving */
1015 urb->dev = port->serial->dev; 999 urb->dev = port->serial->dev;
1016 if (port->port.count) { 1000 err = usb_submit_urb(urb, GFP_ATOMIC);
1017 err = usb_submit_urb(urb, GFP_ATOMIC); 1001 if (err != 0)
1018 if (err != 0) 1002 dbg("%s - resubmit read urb failed. (%d)", __func__, err);
1019 dbg("%s - resubmit read urb failed. (%d)",
1020 __func__, err);
1021 }
1022 return;
1023} 1003}
1024 1004
1025 1005
@@ -2418,8 +2398,7 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
2418 msg.portEnabled = 0; 2398 msg.portEnabled = 0;
2419 /* Sending intermediate configs */ 2399 /* Sending intermediate configs */
2420 else { 2400 else {
2421 if (port->port.count) 2401 msg.portEnabled = 1;
2422 msg.portEnabled = 1;
2423 msg.txBreak = (p_priv->break_on); 2402 msg.txBreak = (p_priv->break_on);
2424 } 2403 }
2425 2404
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 30771e5b3973..bf3297ddd186 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -456,7 +456,7 @@ static const struct keyspan_device_details *keyspan_devices[] = {
456 NULL, 456 NULL,
457}; 457};
458 458
459static struct usb_device_id keyspan_ids_combined[] = { 459static const struct usb_device_id keyspan_ids_combined[] = {
460 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) }, 460 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
461 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) }, 461 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
462 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) }, 462 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
@@ -497,7 +497,7 @@ static struct usb_driver keyspan_driver = {
497}; 497};
498 498
499/* usb_device_id table for the pre-firmware download keyspan devices */ 499/* usb_device_id table for the pre-firmware download keyspan devices */
500static struct usb_device_id keyspan_pre_ids[] = { 500static const struct usb_device_id keyspan_pre_ids[] = {
501 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) }, 501 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
502 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) }, 502 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
503 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) }, 503 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) },
@@ -513,7 +513,7 @@ static struct usb_device_id keyspan_pre_ids[] = {
513 { } /* Terminating entry */ 513 { } /* Terminating entry */
514}; 514};
515 515
516static struct usb_device_id keyspan_1port_ids[] = { 516static const struct usb_device_id keyspan_1port_ids[] = {
517 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) }, 517 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
518 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) }, 518 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
519 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) }, 519 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) },
@@ -524,7 +524,7 @@ static struct usb_device_id keyspan_1port_ids[] = {
524 { } /* Terminating entry */ 524 { } /* Terminating entry */
525}; 525};
526 526
527static struct usb_device_id keyspan_2port_ids[] = { 527static const struct usb_device_id keyspan_2port_ids[] = {
528 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) }, 528 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
529 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) }, 529 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
530 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) }, 530 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
@@ -532,7 +532,7 @@ static struct usb_device_id keyspan_2port_ids[] = {
532 { } /* Terminating entry */ 532 { } /* Terminating entry */
533}; 533};
534 534
535static struct usb_device_id keyspan_4port_ids[] = { 535static const struct usb_device_id keyspan_4port_ids[] = {
536 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) }, 536 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
537 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)}, 537 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
538 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)}, 538 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 1296a097f5c3..185fe9a7d4e0 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -125,7 +125,7 @@ struct keyspan_pda_private {
125#define ENTREGRA_VENDOR_ID 0x1645 125#define ENTREGRA_VENDOR_ID 0x1645
126#define ENTREGRA_FAKE_ID 0x8093 126#define ENTREGRA_FAKE_ID 0x8093
127 127
128static struct usb_device_id id_table_combined [] = { 128static const struct usb_device_id id_table_combined[] = {
129#ifdef KEYSPAN 129#ifdef KEYSPAN
130 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) }, 130 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
131#endif 131#endif
@@ -147,20 +147,20 @@ static struct usb_driver keyspan_pda_driver = {
147 .no_dynamic_id = 1, 147 .no_dynamic_id = 1,
148}; 148};
149 149
150static struct usb_device_id id_table_std [] = { 150static const struct usb_device_id id_table_std[] = {
151 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, 151 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
152 { } /* Terminating entry */ 152 { } /* Terminating entry */
153}; 153};
154 154
155#ifdef KEYSPAN 155#ifdef KEYSPAN
156static struct usb_device_id id_table_fake [] = { 156static const struct usb_device_id id_table_fake[] = {
157 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) }, 157 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
158 { } /* Terminating entry */ 158 { } /* Terminating entry */
159}; 159};
160#endif 160#endif
161 161
162#ifdef XIRCOM 162#ifdef XIRCOM
163static struct usb_device_id id_table_fake_xircom [] = { 163static const struct usb_device_id id_table_fake_xircom[] = {
164 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, 164 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
165 { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) }, 165 { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
166 { } 166 { }
@@ -429,13 +429,20 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
429 unsigned char *value) 429 unsigned char *value)
430{ 430{
431 int rc; 431 int rc;
432 unsigned char data; 432 u8 *data;
433
434 data = kmalloc(1, GFP_KERNEL);
435 if (!data)
436 return -ENOMEM;
437
433 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 438 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
434 3, /* get pins */ 439 3, /* get pins */
435 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 440 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
436 0, 0, &data, 1, 2000); 441 0, 0, data, 1, 2000);
437 if (rc >= 0) 442 if (rc >= 0)
438 *value = data; 443 *value = *data;
444
445 kfree(data);
439 return rc; 446 return rc;
440} 447}
441 448
@@ -543,7 +550,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
543 device how much room it really has. This is done only on 550 device how much room it really has. This is done only on
544 scheduler time, since usb_control_msg() sleeps. */ 551 scheduler time, since usb_control_msg() sleeps. */
545 if (count > priv->tx_room && !in_interrupt()) { 552 if (count > priv->tx_room && !in_interrupt()) {
546 unsigned char room; 553 u8 *room;
554
555 room = kmalloc(1, GFP_KERNEL);
556 if (!room) {
557 rc = -ENOMEM;
558 goto exit;
559 }
560
547 rc = usb_control_msg(serial->dev, 561 rc = usb_control_msg(serial->dev,
548 usb_rcvctrlpipe(serial->dev, 0), 562 usb_rcvctrlpipe(serial->dev, 0),
549 6, /* write_room */ 563 6, /* write_room */
@@ -551,9 +565,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
551 | USB_DIR_IN, 565 | USB_DIR_IN,
552 0, /* value: 0 means "remaining room" */ 566 0, /* value: 0 means "remaining room" */
553 0, /* index */ 567 0, /* index */
554 &room, 568 room,
555 1, 569 1,
556 2000); 570 2000);
571 if (rc > 0) {
572 dbg(" roomquery says %d", *room);
573 priv->tx_room = *room;
574 }
575 kfree(room);
557 if (rc < 0) { 576 if (rc < 0) {
558 dbg(" roomquery failed"); 577 dbg(" roomquery failed");
559 goto exit; 578 goto exit;
@@ -563,8 +582,6 @@ static int keyspan_pda_write(struct tty_struct *tty,
563 rc = -EIO; /* device didn't return any data */ 582 rc = -EIO; /* device didn't return any data */
564 goto exit; 583 goto exit;
565 } 584 }
566 dbg(" roomquery says %d", room);
567 priv->tx_room = room;
568 } 585 }
569 if (count > priv->tx_room) { 586 if (count > priv->tx_room) {
570 /* we're about to completely fill the Tx buffer, so 587 /* we're about to completely fill the Tx buffer, so
@@ -684,18 +701,22 @@ static int keyspan_pda_open(struct tty_struct *tty,
684 struct usb_serial_port *port) 701 struct usb_serial_port *port)
685{ 702{
686 struct usb_serial *serial = port->serial; 703 struct usb_serial *serial = port->serial;
687 unsigned char room; 704 u8 *room;
688 int rc = 0; 705 int rc = 0;
689 struct keyspan_pda_private *priv; 706 struct keyspan_pda_private *priv;
690 707
691 /* find out how much room is in the Tx ring */ 708 /* find out how much room is in the Tx ring */
709 room = kmalloc(1, GFP_KERNEL);
710 if (!room)
711 return -ENOMEM;
712
692 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 713 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
693 6, /* write_room */ 714 6, /* write_room */
694 USB_TYPE_VENDOR | USB_RECIP_INTERFACE 715 USB_TYPE_VENDOR | USB_RECIP_INTERFACE
695 | USB_DIR_IN, 716 | USB_DIR_IN,
696 0, /* value */ 717 0, /* value */
697 0, /* index */ 718 0, /* index */
698 &room, 719 room,
699 1, 720 1,
700 2000); 721 2000);
701 if (rc < 0) { 722 if (rc < 0) {
@@ -708,8 +729,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
708 goto error; 729 goto error;
709 } 730 }
710 priv = usb_get_serial_port_data(port); 731 priv = usb_get_serial_port_data(port);
711 priv->tx_room = room; 732 priv->tx_room = *room;
712 priv->tx_throttled = room ? 0 : 1; 733 priv->tx_throttled = *room ? 0 : 1;
713 734
714 /*Start reading from the device*/ 735 /*Start reading from the device*/
715 port->interrupt_in_urb->dev = serial->dev; 736 port->interrupt_in_urb->dev = serial->dev;
@@ -718,8 +739,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
718 dbg("%s - usb_submit_urb(read int) failed", __func__); 739 dbg("%s - usb_submit_urb(read int) failed", __func__);
719 goto error; 740 goto error;
720 } 741 }
721
722error: 742error:
743 kfree(room);
723 return rc; 744 return rc;
724} 745}
725static void keyspan_pda_close(struct usb_serial_port *port) 746static void keyspan_pda_close(struct usb_serial_port *port)
@@ -789,6 +810,13 @@ static int keyspan_pda_fake_startup(struct usb_serial *serial)
789 return 1; 810 return 1;
790} 811}
791 812
813#ifdef KEYSPAN
814MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
815#endif
816#ifdef XIRCOM
817MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
818#endif
819
792static int keyspan_pda_startup(struct usb_serial *serial) 820static int keyspan_pda_startup(struct usb_serial *serial)
793{ 821{
794 822
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 3a7873806f46..8eef91ba4b1c 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -94,7 +94,7 @@ static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
94/* 94/*
95 * All of the device info needed for the KLSI converters. 95 * All of the device info needed for the KLSI converters.
96 */ 96 */
97static struct usb_device_id id_table [] = { 97static const struct usb_device_id id_table[] = {
98 { USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) }, 98 { USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
99 { USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) }, 99 { USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) },
100 { } /* Terminating entry */ 100 { } /* Terminating entry */
@@ -212,10 +212,19 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
212 unsigned long *line_state_p) 212 unsigned long *line_state_p)
213{ 213{
214 int rc; 214 int rc;
215 __u8 status_buf[KLSI_STATUSBUF_LEN] = { -1, -1}; 215 u8 *status_buf;
216 __u16 status; 216 __u16 status;
217 217
218 dev_info(&port->serial->dev->dev, "sending SIO Poll request\n"); 218 dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
219
220 status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
221 if (!status_buf) {
222 dev_err(&port->dev, "%s - out of memory for status buffer.\n",
223 __func__);
224 return -ENOMEM;
225 }
226 status_buf[0] = 0xff;
227 status_buf[1] = 0xff;
219 rc = usb_control_msg(port->serial->dev, 228 rc = usb_control_msg(port->serial->dev,
220 usb_rcvctrlpipe(port->serial->dev, 0), 229 usb_rcvctrlpipe(port->serial->dev, 0),
221 KL5KUSB105A_SIO_POLL, 230 KL5KUSB105A_SIO_POLL,
@@ -236,6 +245,8 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
236 245
237 *line_state_p = klsi_105_status2linestate(status); 246 *line_state_p = klsi_105_status2linestate(status);
238 } 247 }
248
249 kfree(status_buf);
239 return rc; 250 return rc;
240} 251}
241 252
@@ -364,7 +375,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
364 int rc; 375 int rc;
365 int i; 376 int i;
366 unsigned long line_state; 377 unsigned long line_state;
367 struct klsi_105_port_settings cfg; 378 struct klsi_105_port_settings *cfg;
368 unsigned long flags; 379 unsigned long flags;
369 380
370 dbg("%s port %d", __func__, port->number); 381 dbg("%s port %d", __func__, port->number);
@@ -376,12 +387,18 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
376 * Then read the modem line control and store values in 387 * Then read the modem line control and store values in
377 * priv->line_state. 388 * priv->line_state.
378 */ 389 */
379 cfg.pktlen = 5; 390 cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
380 cfg.baudrate = kl5kusb105a_sio_b9600; 391 if (!cfg) {
381 cfg.databits = kl5kusb105a_dtb_8; 392 dev_err(&port->dev, "%s - out of memory for config buffer.\n",
382 cfg.unknown1 = 0; 393 __func__);
383 cfg.unknown2 = 1; 394 return -ENOMEM;
384 klsi_105_chg_port_settings(port, &cfg); 395 }
396 cfg->pktlen = 5;
397 cfg->baudrate = kl5kusb105a_sio_b9600;
398 cfg->databits = kl5kusb105a_dtb_8;
399 cfg->unknown1 = 0;
400 cfg->unknown2 = 1;
401 klsi_105_chg_port_settings(port, cfg);
385 402
386 /* set up termios structure */ 403 /* set up termios structure */
387 spin_lock_irqsave(&priv->lock, flags); 404 spin_lock_irqsave(&priv->lock, flags);
@@ -391,11 +408,11 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
391 priv->termios.c_lflag = tty->termios->c_lflag; 408 priv->termios.c_lflag = tty->termios->c_lflag;
392 for (i = 0; i < NCCS; i++) 409 for (i = 0; i < NCCS; i++)
393 priv->termios.c_cc[i] = tty->termios->c_cc[i]; 410 priv->termios.c_cc[i] = tty->termios->c_cc[i];
394 priv->cfg.pktlen = cfg.pktlen; 411 priv->cfg.pktlen = cfg->pktlen;
395 priv->cfg.baudrate = cfg.baudrate; 412 priv->cfg.baudrate = cfg->baudrate;
396 priv->cfg.databits = cfg.databits; 413 priv->cfg.databits = cfg->databits;
397 priv->cfg.unknown1 = cfg.unknown1; 414 priv->cfg.unknown1 = cfg->unknown1;
398 priv->cfg.unknown2 = cfg.unknown2; 415 priv->cfg.unknown2 = cfg->unknown2;
399 spin_unlock_irqrestore(&priv->lock, flags); 416 spin_unlock_irqrestore(&priv->lock, flags);
400 417
401 /* READ_ON and urb submission */ 418 /* READ_ON and urb submission */
@@ -441,6 +458,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
441 retval = rc; 458 retval = rc;
442 459
443exit: 460exit:
461 kfree(cfg);
444 return retval; 462 return retval;
445} /* klsi_105_open */ 463} /* klsi_105_open */
446 464
@@ -681,7 +699,6 @@ static void klsi_105_read_bulk_callback(struct urb *urb)
681 bytes_sent = urb->actual_length - 2; 699 bytes_sent = urb->actual_length - 2;
682 } 700 }
683 701
684 tty_buffer_request_room(tty, bytes_sent);
685 tty_insert_flip_string(tty, data + 2, bytes_sent); 702 tty_insert_flip_string(tty, data + 2, bytes_sent);
686 tty_flip_buffer_push(tty); 703 tty_flip_buffer_push(tty);
687 tty_kref_put(tty); 704 tty_kref_put(tty);
@@ -714,10 +731,17 @@ static void klsi_105_set_termios(struct tty_struct *tty,
714 unsigned int old_iflag = old_termios->c_iflag; 731 unsigned int old_iflag = old_termios->c_iflag;
715 unsigned int cflag = tty->termios->c_cflag; 732 unsigned int cflag = tty->termios->c_cflag;
716 unsigned int old_cflag = old_termios->c_cflag; 733 unsigned int old_cflag = old_termios->c_cflag;
717 struct klsi_105_port_settings cfg; 734 struct klsi_105_port_settings *cfg;
718 unsigned long flags; 735 unsigned long flags;
719 speed_t baud; 736 speed_t baud;
720 737
738 cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
739 if (!cfg) {
740 dev_err(&port->dev, "%s - out of memory for config buffer.\n",
741 __func__);
742 return;
743 }
744
721 /* lock while we are modifying the settings */ 745 /* lock while we are modifying the settings */
722 spin_lock_irqsave(&priv->lock, flags); 746 spin_lock_irqsave(&priv->lock, flags);
723 747
@@ -793,11 +817,11 @@ static void klsi_105_set_termios(struct tty_struct *tty,
793 case CS5: 817 case CS5:
794 dbg("%s - 5 bits/byte not supported", __func__); 818 dbg("%s - 5 bits/byte not supported", __func__);
795 spin_unlock_irqrestore(&priv->lock, flags); 819 spin_unlock_irqrestore(&priv->lock, flags);
796 return ; 820 goto err;
797 case CS6: 821 case CS6:
798 dbg("%s - 6 bits/byte not supported", __func__); 822 dbg("%s - 6 bits/byte not supported", __func__);
799 spin_unlock_irqrestore(&priv->lock, flags); 823 spin_unlock_irqrestore(&priv->lock, flags);
800 return ; 824 goto err;
801 case CS7: 825 case CS7:
802 priv->cfg.databits = kl5kusb105a_dtb_7; 826 priv->cfg.databits = kl5kusb105a_dtb_7;
803 break; 827 break;
@@ -856,11 +880,13 @@ static void klsi_105_set_termios(struct tty_struct *tty,
856#endif 880#endif
857 ; 881 ;
858 } 882 }
859 memcpy(&cfg, &priv->cfg, sizeof(cfg)); 883 memcpy(cfg, &priv->cfg, sizeof(*cfg));
860 spin_unlock_irqrestore(&priv->lock, flags); 884 spin_unlock_irqrestore(&priv->lock, flags);
861 885
862 /* now commit changes to device */ 886 /* now commit changes to device */
863 klsi_105_chg_port_settings(port, &cfg); 887 klsi_105_chg_port_settings(port, cfg);
888err:
889 kfree(cfg);
864} /* klsi_105_set_termios */ 890} /* klsi_105_set_termios */
865 891
866 892
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 45ea694b3ae6..c113a2a0e10c 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -86,7 +86,7 @@ static void kobil_set_termios(struct tty_struct *tty,
86 struct usb_serial_port *port, struct ktermios *old); 86 struct usb_serial_port *port, struct ktermios *old);
87static void kobil_init_termios(struct tty_struct *tty); 87static void kobil_init_termios(struct tty_struct *tty);
88 88
89static struct usb_device_id id_table [] = { 89static const struct usb_device_id id_table[] = {
90 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) }, 90 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) },
91 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) }, 91 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) },
92 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) }, 92 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) },
@@ -388,7 +388,6 @@ static void kobil_read_int_callback(struct urb *urb)
388 */ 388 */
389 /* END DEBUG */ 389 /* END DEBUG */
390 390
391 tty_buffer_request_room(tty, urb->actual_length);
392 tty_insert_flip_string(tty, data, urb->actual_length); 391 tty_insert_flip_string(tty, data, urb->actual_length);
393 tty_flip_buffer_push(tty); 392 tty_flip_buffer_push(tty);
394 } 393 }
@@ -624,7 +623,6 @@ static void kobil_set_termios(struct tty_struct *tty,
624 unsigned short urb_val = 0; 623 unsigned short urb_val = 0;
625 int c_cflag = tty->termios->c_cflag; 624 int c_cflag = tty->termios->c_cflag;
626 speed_t speed; 625 speed_t speed;
627 void *settings;
628 626
629 priv = usb_get_serial_port_data(port); 627 priv = usb_get_serial_port_data(port);
630 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || 628 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
@@ -647,25 +645,13 @@ static void kobil_set_termios(struct tty_struct *tty,
647 } 645 }
648 urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : 646 urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits :
649 SUSBCR_SPASB_1StopBit; 647 SUSBCR_SPASB_1StopBit;
650
651 settings = kzalloc(50, GFP_KERNEL);
652 if (!settings)
653 return;
654
655 sprintf(settings, "%d ", speed);
656
657 if (c_cflag & PARENB) { 648 if (c_cflag & PARENB) {
658 if (c_cflag & PARODD) { 649 if (c_cflag & PARODD)
659 urb_val |= SUSBCR_SPASB_OddParity; 650 urb_val |= SUSBCR_SPASB_OddParity;
660 strcat(settings, "Odd Parity"); 651 else
661 } else {
662 urb_val |= SUSBCR_SPASB_EvenParity; 652 urb_val |= SUSBCR_SPASB_EvenParity;
663 strcat(settings, "Even Parity"); 653 } else
664 }
665 } else {
666 urb_val |= SUSBCR_SPASB_NoParity; 654 urb_val |= SUSBCR_SPASB_NoParity;
667 strcat(settings, "No Parity");
668 }
669 tty->termios->c_cflag &= ~CMSPAR; 655 tty->termios->c_cflag &= ~CMSPAR;
670 tty_encode_baud_rate(tty, speed, speed); 656 tty_encode_baud_rate(tty, speed, speed);
671 657
@@ -675,11 +661,10 @@ static void kobil_set_termios(struct tty_struct *tty,
675 USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, 661 USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
676 urb_val, 662 urb_val,
677 0, 663 0,
678 settings, 664 NULL,
679 0, 665 0,
680 KOBIL_TIMEOUT 666 KOBIL_TIMEOUT
681 ); 667 );
682 kfree(settings);
683} 668}
684 669
685static int kobil_ioctl(struct tty_struct *tty, struct file *file, 670static int kobil_ioctl(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index cd009cb280a5..2849f8c32015 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -75,6 +75,7 @@
75#include <linux/module.h> 75#include <linux/module.h>
76#include <linux/spinlock.h> 76#include <linux/spinlock.h>
77#include <linux/uaccess.h> 77#include <linux/uaccess.h>
78#include <asm/unaligned.h>
78#include <linux/usb.h> 79#include <linux/usb.h>
79#include <linux/usb/serial.h> 80#include <linux/usb/serial.h>
80#include "mct_u232.h" 81#include "mct_u232.h"
@@ -110,7 +111,7 @@ static void mct_u232_unthrottle(struct tty_struct *tty);
110/* 111/*
111 * All of the device info needed for the MCT USB-RS232 converter. 112 * All of the device info needed for the MCT USB-RS232 converter.
112 */ 113 */
113static struct usb_device_id id_table_combined [] = { 114static const struct usb_device_id id_table_combined[] = {
114 { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) }, 115 { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) },
115 { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) }, 116 { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) },
116 { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) }, 117 { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) },
@@ -231,19 +232,22 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
231static int mct_u232_set_baud_rate(struct tty_struct *tty, 232static int mct_u232_set_baud_rate(struct tty_struct *tty,
232 struct usb_serial *serial, struct usb_serial_port *port, speed_t value) 233 struct usb_serial *serial, struct usb_serial_port *port, speed_t value)
233{ 234{
234 __le32 divisor; 235 unsigned int divisor;
235 int rc; 236 int rc;
236 unsigned char zero_byte = 0; 237 unsigned char *buf;
237 unsigned char cts_enable_byte = 0; 238 unsigned char cts_enable_byte = 0;
238 speed_t speed; 239 speed_t speed;
239 240
240 divisor = cpu_to_le32(mct_u232_calculate_baud_rate(serial, value, 241 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
241 &speed)); 242 if (buf == NULL)
243 return -ENOMEM;
242 244
245 divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
246 put_unaligned_le32(cpu_to_le32(divisor), buf);
243 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 247 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
244 MCT_U232_SET_BAUD_RATE_REQUEST, 248 MCT_U232_SET_BAUD_RATE_REQUEST,
245 MCT_U232_SET_REQUEST_TYPE, 249 MCT_U232_SET_REQUEST_TYPE,
246 0, 0, &divisor, MCT_U232_SET_BAUD_RATE_SIZE, 250 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE,
247 WDR_TIMEOUT); 251 WDR_TIMEOUT);
248 if (rc < 0) /*FIXME: What value speed results */ 252 if (rc < 0) /*FIXME: What value speed results */
249 dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n", 253 dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
@@ -269,10 +273,11 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
269 a device which is not asserting 'CTS'. 273 a device which is not asserting 'CTS'.
270 */ 274 */
271 275
276 buf[0] = 0;
272 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 277 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
273 MCT_U232_SET_UNKNOWN1_REQUEST, 278 MCT_U232_SET_UNKNOWN1_REQUEST,
274 MCT_U232_SET_REQUEST_TYPE, 279 MCT_U232_SET_REQUEST_TYPE,
275 0, 0, &zero_byte, MCT_U232_SET_UNKNOWN1_SIZE, 280 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE,
276 WDR_TIMEOUT); 281 WDR_TIMEOUT);
277 if (rc < 0) 282 if (rc < 0)
278 dev_err(&port->dev, "Sending USB device request code %d " 283 dev_err(&port->dev, "Sending USB device request code %d "
@@ -284,30 +289,40 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
284 289
285 dbg("set_baud_rate: send second control message, data = %02X", 290 dbg("set_baud_rate: send second control message, data = %02X",
286 cts_enable_byte); 291 cts_enable_byte);
292 buf[0] = cts_enable_byte;
287 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 293 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
288 MCT_U232_SET_CTS_REQUEST, 294 MCT_U232_SET_CTS_REQUEST,
289 MCT_U232_SET_REQUEST_TYPE, 295 MCT_U232_SET_REQUEST_TYPE,
290 0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE, 296 0, 0, buf, MCT_U232_SET_CTS_SIZE,
291 WDR_TIMEOUT); 297 WDR_TIMEOUT);
292 if (rc < 0) 298 if (rc < 0)
293 dev_err(&port->dev, "Sending USB device request code %d " 299 dev_err(&port->dev, "Sending USB device request code %d "
294 "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc); 300 "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
295 301
302 kfree(buf);
296 return rc; 303 return rc;
297} /* mct_u232_set_baud_rate */ 304} /* mct_u232_set_baud_rate */
298 305
299static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr) 306static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr)
300{ 307{
301 int rc; 308 int rc;
309 unsigned char *buf;
310
311 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
312 if (buf == NULL)
313 return -ENOMEM;
314
315 buf[0] = lcr;
302 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 316 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
303 MCT_U232_SET_LINE_CTRL_REQUEST, 317 MCT_U232_SET_LINE_CTRL_REQUEST,
304 MCT_U232_SET_REQUEST_TYPE, 318 MCT_U232_SET_REQUEST_TYPE,
305 0, 0, &lcr, MCT_U232_SET_LINE_CTRL_SIZE, 319 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE,
306 WDR_TIMEOUT); 320 WDR_TIMEOUT);
307 if (rc < 0) 321 if (rc < 0)
308 dev_err(&serial->dev->dev, 322 dev_err(&serial->dev->dev,
309 "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc); 323 "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
310 dbg("set_line_ctrl: 0x%x", lcr); 324 dbg("set_line_ctrl: 0x%x", lcr);
325 kfree(buf);
311 return rc; 326 return rc;
312} /* mct_u232_set_line_ctrl */ 327} /* mct_u232_set_line_ctrl */
313 328
@@ -315,23 +330,31 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
315 unsigned int control_state) 330 unsigned int control_state)
316{ 331{
317 int rc; 332 int rc;
318 unsigned char mcr = MCT_U232_MCR_NONE; 333 unsigned char mcr;
334 unsigned char *buf;
335
336 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
337 if (buf == NULL)
338 return -ENOMEM;
319 339
340 mcr = MCT_U232_MCR_NONE;
320 if (control_state & TIOCM_DTR) 341 if (control_state & TIOCM_DTR)
321 mcr |= MCT_U232_MCR_DTR; 342 mcr |= MCT_U232_MCR_DTR;
322 if (control_state & TIOCM_RTS) 343 if (control_state & TIOCM_RTS)
323 mcr |= MCT_U232_MCR_RTS; 344 mcr |= MCT_U232_MCR_RTS;
324 345
346 buf[0] = mcr;
325 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 347 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
326 MCT_U232_SET_MODEM_CTRL_REQUEST, 348 MCT_U232_SET_MODEM_CTRL_REQUEST,
327 MCT_U232_SET_REQUEST_TYPE, 349 MCT_U232_SET_REQUEST_TYPE,
328 0, 0, &mcr, MCT_U232_SET_MODEM_CTRL_SIZE, 350 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
329 WDR_TIMEOUT); 351 WDR_TIMEOUT);
330 if (rc < 0) 352 if (rc < 0)
331 dev_err(&serial->dev->dev, 353 dev_err(&serial->dev->dev,
332 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); 354 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
333 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); 355 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
334 356
357 kfree(buf);
335 return rc; 358 return rc;
336} /* mct_u232_set_modem_ctrl */ 359} /* mct_u232_set_modem_ctrl */
337 360
@@ -339,17 +362,27 @@ static int mct_u232_get_modem_stat(struct usb_serial *serial,
339 unsigned char *msr) 362 unsigned char *msr)
340{ 363{
341 int rc; 364 int rc;
365 unsigned char *buf;
366
367 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
368 if (buf == NULL) {
369 *msr = 0;
370 return -ENOMEM;
371 }
342 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 372 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
343 MCT_U232_GET_MODEM_STAT_REQUEST, 373 MCT_U232_GET_MODEM_STAT_REQUEST,
344 MCT_U232_GET_REQUEST_TYPE, 374 MCT_U232_GET_REQUEST_TYPE,
345 0, 0, msr, MCT_U232_GET_MODEM_STAT_SIZE, 375 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
346 WDR_TIMEOUT); 376 WDR_TIMEOUT);
347 if (rc < 0) { 377 if (rc < 0) {
348 dev_err(&serial->dev->dev, 378 dev_err(&serial->dev->dev,
349 "Get MODEM STATus failed (error = %d)\n", rc); 379 "Get MODEM STATus failed (error = %d)\n", rc);
350 *msr = 0; 380 *msr = 0;
381 } else {
382 *msr = buf[0];
351 } 383 }
352 dbg("get_modem_stat: 0x%x", *msr); 384 dbg("get_modem_stat: 0x%x", *msr);
385 kfree(buf);
353 return rc; 386 return rc;
354} /* mct_u232_get_modem_stat */ 387} /* mct_u232_get_modem_stat */
355 388
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 07b6bec31dc8..7417d5ce1e23 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -73,6 +73,8 @@
73#define MCT_U232_SET_CTS_REQUEST 12 73#define MCT_U232_SET_CTS_REQUEST 12
74#define MCT_U232_SET_CTS_SIZE 1 74#define MCT_U232_SET_CTS_SIZE 1
75 75
76#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */
77
76/* 78/*
77 * Baud rate (divisor) 79 * Baud rate (divisor)
78 * Actually, there are two of them, MCT website calls them "Philips solution" 80 * Actually, there are two of them, MCT website calls them "Philips solution"
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 763e32a44be0..0d47f2c4d59f 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -81,12 +81,15 @@ struct moschip_serial {
81 81
82static int debug; 82static int debug;
83 83
84static struct usb_serial_driver moschip7720_2port_driver;
85
84#define USB_VENDOR_ID_MOSCHIP 0x9710 86#define USB_VENDOR_ID_MOSCHIP 0x9710
85#define MOSCHIP_DEVICE_ID_7720 0x7720 87#define MOSCHIP_DEVICE_ID_7720 0x7720
86#define MOSCHIP_DEVICE_ID_7715 0x7715 88#define MOSCHIP_DEVICE_ID_7715 0x7715
87 89
88static struct usb_device_id moschip_port_id_table[] = { 90static const struct usb_device_id moschip_port_id_table[] = {
89 { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) }, 91 { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) },
92 { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7715) },
90 { } /* terminating entry */ 93 { } /* terminating entry */
91}; 94};
92MODULE_DEVICE_TABLE(usb, moschip_port_id_table); 95MODULE_DEVICE_TABLE(usb, moschip_port_id_table);
@@ -106,7 +109,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
106 __u8 sp1; 109 __u8 sp1;
107 __u8 sp2; 110 __u8 sp2;
108 111
109 dbg("%s", " : Entering\n"); 112 dbg(" : Entering");
110 113
111 switch (status) { 114 switch (status) {
112 case 0: 115 case 0:
@@ -186,6 +189,75 @@ exit:
186} 189}
187 190
188/* 191/*
192 * mos7715_interrupt_callback
193 * this is the 7715's callback function for when we have received data on
194 * the interrupt endpoint.
195 */
196static void mos7715_interrupt_callback(struct urb *urb)
197{
198 int result;
199 int length;
200 int status = urb->status;
201 __u8 *data;
202 __u8 iir;
203
204 switch (status) {
205 case 0:
206 /* success */
207 break;
208 case -ECONNRESET:
209 case -ENOENT:
210 case -ESHUTDOWN:
211 /* this urb is terminated, clean up */
212 dbg("%s - urb shutting down with status: %d", __func__,
213 status);
214 return;
215 default:
216 dbg("%s - nonzero urb status received: %d", __func__,
217 status);
218 goto exit;
219 }
220
221 length = urb->actual_length;
222 data = urb->transfer_buffer;
223
224 /* Structure of data from 7715 device:
225 * Byte 1: IIR serial Port
226 * Byte 2: unused
227 * Byte 2: DSR parallel port
228 * Byte 4: FIFO status for both */
229
230 if (unlikely(length != 4)) {
231 dbg("Wrong data !!!");
232 return;
233 }
234
235 iir = data[0];
236 if (!(iir & 0x01)) { /* serial port interrupt pending */
237 switch (iir & 0x0f) {
238 case SERIAL_IIR_RLS:
239 dbg("Serial Port: Receiver status error or address "
240 "bit detected in 9-bit mode\n");
241 break;
242 case SERIAL_IIR_CTI:
243 dbg("Serial Port: Receiver time out");
244 break;
245 case SERIAL_IIR_MS:
246 dbg("Serial Port: Modem status change");
247 break;
248 }
249 }
250
251exit:
252 result = usb_submit_urb(urb, GFP_ATOMIC);
253 if (result)
254 dev_err(&urb->dev->dev,
255 "%s - Error %d submitting control urb\n",
256 __func__, result);
257 return;
258}
259
260/*
189 * mos7720_bulk_in_callback 261 * mos7720_bulk_in_callback
190 * this is the callback function for when we have received data on the 262 * this is the callback function for when we have received data on the
191 * bulk in endpoint. 263 * bulk in endpoint.
@@ -206,7 +278,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
206 278
207 mos7720_port = urb->context; 279 mos7720_port = urb->context;
208 if (!mos7720_port) { 280 if (!mos7720_port) {
209 dbg("%s", "NULL mos7720_port pointer \n"); 281 dbg("NULL mos7720_port pointer");
210 return ; 282 return ;
211 } 283 }
212 284
@@ -218,7 +290,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
218 290
219 tty = tty_port_tty_get(&port->port); 291 tty = tty_port_tty_get(&port->port);
220 if (tty && urb->actual_length) { 292 if (tty && urb->actual_length) {
221 tty_buffer_request_room(tty, urb->actual_length);
222 tty_insert_flip_string(tty, data, urb->actual_length); 293 tty_insert_flip_string(tty, data, urb->actual_length);
223 tty_flip_buffer_push(tty); 294 tty_flip_buffer_push(tty);
224 } 295 }
@@ -275,17 +346,15 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
275 * this function will be used for sending command to device 346 * this function will be used for sending command to device
276 */ 347 */
277static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value, 348static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
278 __u16 index, void *data) 349 __u16 index, u8 *data)
279{ 350{
280 int status; 351 int status;
281 unsigned int pipe; 352 u8 *buf;
282 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); 353 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
283 __u8 requesttype;
284 __u16 size = 0x0000;
285 354
286 if (value < MOS_MAX_PORT) { 355 if (value < MOS_MAX_PORT) {
287 if (product == MOSCHIP_DEVICE_ID_7715) 356 if (product == MOSCHIP_DEVICE_ID_7715)
288 value = value*0x100+0x100; 357 value = 0x0200; /* identifies the 7715's serial port */
289 else 358 else
290 value = value*0x100+0x200; 359 value = value*0x100+0x200;
291 } else { 360 } else {
@@ -298,27 +367,58 @@ static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
298 } 367 }
299 368
300 if (request == MOS_WRITE) { 369 if (request == MOS_WRITE) {
301 request = (__u8)MOS_WRITE; 370 value = value + *data;
302 requesttype = (__u8)0x40; 371 status = usb_control_msg(serial->dev,
303 value = value + (__u16)*((unsigned char *)data); 372 usb_sndctrlpipe(serial->dev, 0), MOS_WRITE,
304 data = NULL; 373 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT);
305 pipe = usb_sndctrlpipe(serial->dev, 0);
306 } else { 374 } else {
307 request = (__u8)MOS_READ; 375 buf = kmalloc(1, GFP_KERNEL);
308 requesttype = (__u8)0xC0; 376 if (!buf) {
309 size = 0x01; 377 status = -ENOMEM;
310 pipe = usb_rcvctrlpipe(serial->dev, 0); 378 goto out;
379 }
380 status = usb_control_msg(serial->dev,
381 usb_rcvctrlpipe(serial->dev, 0), MOS_READ,
382 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT);
383 *data = *buf;
384 kfree(buf);
311 } 385 }
312 386out:
313 status = usb_control_msg(serial->dev, pipe, request, requesttype,
314 value, index, data, size, MOS_WDR_TIMEOUT);
315
316 if (status < 0) 387 if (status < 0)
317 dbg("Command Write failed Value %x index %x\n", value, index); 388 dbg("Command Write failed Value %x index %x", value, index);
318 389
319 return status; 390 return status;
320} 391}
321 392
393
394/*
395 * mos77xx_probe
396 * this function installs the appropriate read interrupt endpoint callback
397 * depending on whether the device is a 7720 or 7715, thus avoiding costly
398 * run-time checks in the high-frequency callback routine itself.
399 */
400static int mos77xx_probe(struct usb_serial *serial,
401 const struct usb_device_id *id)
402{
403 if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
404 moschip7720_2port_driver.read_int_callback =
405 mos7715_interrupt_callback;
406 else
407 moschip7720_2port_driver.read_int_callback =
408 mos7720_interrupt_callback;
409
410 return 0;
411}
412
413static int mos77xx_calc_num_ports(struct usb_serial *serial)
414{
415 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
416 if (product == MOSCHIP_DEVICE_ID_7715)
417 return 1;
418
419 return 2;
420}
421
322static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) 422static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
323{ 423{
324 struct usb_serial *serial; 424 struct usb_serial *serial;
@@ -390,7 +490,7 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
390 */ 490 */
391 port_number = port->number - port->serial->minor; 491 port_number = port->number - port->serial->minor;
392 send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data); 492 send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data);
393 dbg("SS::%p LSR:%x\n", mos7720_port, data); 493 dbg("SS::%p LSR:%x", mos7720_port, data);
394 494
395 dbg("Check:Sending Command .........."); 495 dbg("Check:Sending Command ..........");
396 496
@@ -729,7 +829,7 @@ static void mos7720_throttle(struct tty_struct *tty)
729 struct moschip_port *mos7720_port; 829 struct moschip_port *mos7720_port;
730 int status; 830 int status;
731 831
732 dbg("%s- port %d\n", __func__, port->number); 832 dbg("%s- port %d", __func__, port->number);
733 833
734 mos7720_port = usb_get_serial_port_data(port); 834 mos7720_port = usb_get_serial_port_data(port);
735 835
@@ -1208,7 +1308,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
1208 return; 1308 return;
1209 } 1309 }
1210 1310
1211 dbg("%s\n", "setting termios - ASPIRE"); 1311 dbg("setting termios - ASPIRE");
1212 1312
1213 cflag = tty->termios->c_cflag; 1313 cflag = tty->termios->c_cflag;
1214 1314
@@ -1226,7 +1326,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
1226 change_port_settings(tty, mos7720_port, old_termios); 1326 change_port_settings(tty, mos7720_port, old_termios);
1227 1327
1228 if (!port->read_urb) { 1328 if (!port->read_urb) {
1229 dbg("%s", "URB KILLED !!!!!\n"); 1329 dbg("URB KILLED !!!!!");
1230 return; 1330 return;
1231 } 1331 }
1232 1332
@@ -1495,6 +1595,7 @@ static int mos7720_startup(struct usb_serial *serial)
1495 struct usb_device *dev; 1595 struct usb_device *dev;
1496 int i; 1596 int i;
1497 char data; 1597 char data;
1598 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
1498 1599
1499 dbg("%s: Entering ..........", __func__); 1600 dbg("%s: Entering ..........", __func__);
1500 1601
@@ -1514,6 +1615,29 @@ static int mos7720_startup(struct usb_serial *serial)
1514 1615
1515 usb_set_serial_data(serial, mos7720_serial); 1616 usb_set_serial_data(serial, mos7720_serial);
1516 1617
1618 /*
1619 * The 7715 uses the first bulk in/out endpoint pair for the parallel
1620 * port, and the second for the serial port. Because the usbserial core
1621 * assumes both pairs are serial ports, we must engage in a bit of
1622 * subterfuge and swap the pointers for ports 0 and 1 in order to make
1623 * port 0 point to the serial port. However, both moschip devices use a
1624 * single interrupt-in endpoint for both ports (as mentioned a little
1625 * further down), and this endpoint was assigned to port 0. So after
1626 * the swap, we must copy the interrupt endpoint elements from port 1
1627 * (as newly assigned) to port 0, and null out port 1 pointers.
1628 */
1629 if (product == MOSCHIP_DEVICE_ID_7715) {
1630 struct usb_serial_port *tmp = serial->port[0];
1631 serial->port[0] = serial->port[1];
1632 serial->port[1] = tmp;
1633 serial->port[0]->interrupt_in_urb = tmp->interrupt_in_urb;
1634 serial->port[0]->interrupt_in_buffer = tmp->interrupt_in_buffer;
1635 serial->port[0]->interrupt_in_endpointAddress =
1636 tmp->interrupt_in_endpointAddress;
1637 serial->port[1]->interrupt_in_urb = NULL;
1638 serial->port[1]->interrupt_in_buffer = NULL;
1639 }
1640
1517 /* we set up the pointers to the endpoints in the mos7720_open * 1641 /* we set up the pointers to the endpoints in the mos7720_open *
1518 * function, as the structures aren't created yet. */ 1642 * function, as the structures aren't created yet. */
1519 1643
@@ -1529,7 +1653,7 @@ static int mos7720_startup(struct usb_serial *serial)
1529 1653
1530 /* Initialize all port interrupt end point to port 0 int 1654 /* Initialize all port interrupt end point to port 0 int
1531 * endpoint. Our device has only one interrupt endpoint 1655 * endpoint. Our device has only one interrupt endpoint
1532 * comman to all ports */ 1656 * common to all ports */
1533 serial->port[i]->interrupt_in_endpointAddress = 1657 serial->port[i]->interrupt_in_endpointAddress =
1534 serial->port[0]->interrupt_in_endpointAddress; 1658 serial->port[0]->interrupt_in_endpointAddress;
1535 1659
@@ -1584,11 +1708,12 @@ static struct usb_serial_driver moschip7720_2port_driver = {
1584 .description = "Moschip 2 port adapter", 1708 .description = "Moschip 2 port adapter",
1585 .usb_driver = &usb_driver, 1709 .usb_driver = &usb_driver,
1586 .id_table = moschip_port_id_table, 1710 .id_table = moschip_port_id_table,
1587 .num_ports = 2, 1711 .calc_num_ports = mos77xx_calc_num_ports,
1588 .open = mos7720_open, 1712 .open = mos7720_open,
1589 .close = mos7720_close, 1713 .close = mos7720_close,
1590 .throttle = mos7720_throttle, 1714 .throttle = mos7720_throttle,
1591 .unthrottle = mos7720_unthrottle, 1715 .unthrottle = mos7720_unthrottle,
1716 .probe = mos77xx_probe,
1592 .attach = mos7720_startup, 1717 .attach = mos7720_startup,
1593 .release = mos7720_release, 1718 .release = mos7720_release,
1594 .ioctl = mos7720_ioctl, 1719 .ioctl = mos7720_ioctl,
@@ -1600,7 +1725,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
1600 .chars_in_buffer = mos7720_chars_in_buffer, 1725 .chars_in_buffer = mos7720_chars_in_buffer,
1601 .break_ctl = mos7720_break, 1726 .break_ctl = mos7720_break,
1602 .read_bulk_callback = mos7720_bulk_in_callback, 1727 .read_bulk_callback = mos7720_bulk_in_callback,
1603 .read_int_callback = mos7720_interrupt_callback, 1728 .read_int_callback = NULL /* dynamically assigned in probe() */
1604}; 1729};
1605 1730
1606static int __init moschip7720_init(void) 1731static int __init moschip7720_init(void)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2cfe2451ed97..2fda1c0182b7 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -181,7 +181,7 @@
181#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ 181#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
182 182
183 183
184static struct usb_device_id moschip_port_id_table[] = { 184static const struct usb_device_id moschip_port_id_table[] = {
185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
186 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 186 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -198,7 +198,7 @@ static struct usb_device_id moschip_port_id_table[] = {
198 {} /* terminating entry */ 198 {} /* terminating entry */
199}; 199};
200 200
201static __devinitdata struct usb_device_id moschip_id_table_combined[] = { 201static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
202 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 202 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
203 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 203 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -283,12 +283,19 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
283{ 283{
284 struct usb_device *dev = port->serial->dev; 284 struct usb_device *dev = port->serial->dev;
285 int ret = 0; 285 int ret = 0;
286 u8 *buf;
287
288 buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
289 if (!buf)
290 return -ENOMEM;
286 291
287 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 292 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
288 MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH, 293 MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
289 MOS_WDR_TIMEOUT); 294 MOS_WDR_TIMEOUT);
295 *val = buf[0];
290 dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val); 296 dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
291 *val = (*val) & 0x00ff; 297
298 kfree(buf);
292 return ret; 299 return ret;
293} 300}
294 301
@@ -341,6 +348,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
341 struct usb_device *dev = port->serial->dev; 348 struct usb_device *dev = port->serial->dev;
342 int ret = 0; 349 int ret = 0;
343 __u16 Wval; 350 __u16 Wval;
351 u8 *buf;
352
353 buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
354 if (!buf)
355 return -ENOMEM;
344 356
345 /* dbg("application number is %4x", 357 /* dbg("application number is %4x",
346 (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */ 358 (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
@@ -364,9 +376,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
364 } 376 }
365 } 377 }
366 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 378 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
367 MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH, 379 MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
368 MOS_WDR_TIMEOUT); 380 MOS_WDR_TIMEOUT);
369 *val = (*val) & 0x00ff; 381 *val = buf[0];
382
383 kfree(buf);
370 return ret; 384 return ret;
371} 385}
372 386
@@ -750,7 +764,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
750 if (urb->actual_length) { 764 if (urb->actual_length) {
751 tty = tty_port_tty_get(&mos7840_port->port->port); 765 tty = tty_port_tty_get(&mos7840_port->port->port);
752 if (tty) { 766 if (tty) {
753 tty_buffer_request_room(tty, urb->actual_length);
754 tty_insert_flip_string(tty, data, urb->actual_length); 767 tty_insert_flip_string(tty, data, urb->actual_length);
755 dbg(" %s ", data); 768 dbg(" %s ", data);
756 tty_flip_buffer_push(tty); 769 tty_flip_buffer_push(tty);
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index 99bd00f5188a..cf1718394e18 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -21,7 +21,7 @@
21#include <linux/usb.h> 21#include <linux/usb.h>
22#include <linux/usb/serial.h> 22#include <linux/usb/serial.h>
23 23
24static struct usb_device_id id_table [] = { 24static const struct usb_device_id id_table[] = {
25 { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ 25 { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
26 { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ 26 { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
27 { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ 27 { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 5ceaa4c6be09..04a6cbbed2c0 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -22,7 +22,7 @@
22 22
23static int debug; 23static int debug;
24 24
25static struct usb_device_id id_table [] = { 25static const struct usb_device_id id_table[] = {
26 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ 26 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
27 { }, 27 { },
28}; 28};
@@ -66,7 +66,6 @@ static void navman_read_int_callback(struct urb *urb)
66 66
67 tty = tty_port_tty_get(&port->port); 67 tty = tty_port_tty_get(&port->port);
68 if (tty && urb->actual_length) { 68 if (tty && urb->actual_length) {
69 tty_buffer_request_room(tty, urb->actual_length);
70 tty_insert_flip_string(tty, data, urb->actual_length); 69 tty_insert_flip_string(tty, data, urb->actual_length);
71 tty_flip_buffer_push(tty); 70 tty_flip_buffer_push(tty);
72 } 71 }
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 062265038bf0..89c724c0ac0a 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -75,7 +75,7 @@ static void omninet_disconnect(struct usb_serial *serial);
75static void omninet_release(struct usb_serial *serial); 75static void omninet_release(struct usb_serial *serial);
76static int omninet_attach(struct usb_serial *serial); 76static int omninet_attach(struct usb_serial *serial);
77 77
78static struct usb_device_id id_table[] = { 78static const struct usb_device_id id_table[] = {
79 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, 79 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
80 { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) }, 80 { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
81 { } /* Terminating entry */ 81 { } /* Terminating entry */
@@ -218,8 +218,8 @@ static void omninet_read_bulk_callback(struct urb *urb)
218 218
219 if (debug && header->oh_xxx != 0x30) { 219 if (debug && header->oh_xxx != 0x30) {
220 if (urb->actual_length) { 220 if (urb->actual_length) {
221 printk(KERN_DEBUG __FILE__ 221 printk(KERN_DEBUG "%s: omninet_read %d: ",
222 ": omninet_read %d: ", header->oh_len); 222 __FILE__, header->oh_len);
223 for (i = 0; i < (header->oh_len + 223 for (i = 0; i < (header->oh_len +
224 OMNINET_HEADERLEN); i++) 224 OMNINET_HEADERLEN); i++)
225 printk("%.2x ", data[i]); 225 printk("%.2x ", data[i]);
@@ -332,7 +332,7 @@ static void omninet_write_bulk_callback(struct urb *urb)
332 struct usb_serial_port *port = urb->context; 332 struct usb_serial_port *port = urb->context;
333 int status = urb->status; 333 int status = urb->status;
334 334
335 dbg("%s - port %0x\n", __func__, port->number); 335 dbg("%s - port %0x", __func__, port->number);
336 336
337 port->write_urb_busy = 0; 337 port->write_urb_busy = 0;
338 if (status) { 338 if (status) {
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4cdb975caa89..f37476e22684 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -22,7 +22,7 @@
22 22
23static int debug; 23static int debug;
24 24
25static struct usb_device_id id_table[] = { 25static const struct usb_device_id id_table[] = {
26 { USB_DEVICE(0x065a, 0x0009) }, 26 { USB_DEVICE(0x065a, 0x0009) },
27 { }, 27 { },
28}; 28};
@@ -55,7 +55,6 @@ static void opticon_bulk_callback(struct urb *urb)
55 int status = urb->status; 55 int status = urb->status;
56 struct tty_struct *tty; 56 struct tty_struct *tty;
57 int result; 57 int result;
58 int available_room = 0;
59 int data_length; 58 int data_length;
60 59
61 dbg("%s - port %d", __func__, port->number); 60 dbg("%s - port %d", __func__, port->number);
@@ -96,13 +95,9 @@ static void opticon_bulk_callback(struct urb *urb)
96 /* real data, send it to the tty layer */ 95 /* real data, send it to the tty layer */
97 tty = tty_port_tty_get(&port->port); 96 tty = tty_port_tty_get(&port->port);
98 if (tty) { 97 if (tty) {
99 available_room = tty_buffer_request_room(tty, 98 tty_insert_flip_string(tty, data,
100 data_length); 99 data_length);
101 if (available_room) { 100 tty_flip_buffer_push(tty);
102 tty_insert_flip_string(tty, data,
103 available_room);
104 tty_flip_buffer_push(tty);
105 }
106 tty_kref_put(tty); 101 tty_kref_put(tty);
107 } 102 }
108 } else { 103 } else {
@@ -217,7 +212,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
217 spin_lock_irqsave(&priv->lock, flags); 212 spin_lock_irqsave(&priv->lock, flags);
218 if (priv->outstanding_urbs > URB_UPPER_LIMIT) { 213 if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
219 spin_unlock_irqrestore(&priv->lock, flags); 214 spin_unlock_irqrestore(&priv->lock, flags);
220 dbg("%s - write limit hit\n", __func__); 215 dbg("%s - write limit hit", __func__);
221 return 0; 216 return 0;
222 } 217 }
223 priv->outstanding_urbs++; 218 priv->outstanding_urbs++;
@@ -288,7 +283,7 @@ static int opticon_write_room(struct tty_struct *tty)
288 spin_lock_irqsave(&priv->lock, flags); 283 spin_lock_irqsave(&priv->lock, flags);
289 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) { 284 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
290 spin_unlock_irqrestore(&priv->lock, flags); 285 spin_unlock_irqrestore(&priv->lock, flags);
291 dbg("%s - write limit hit\n", __func__); 286 dbg("%s - write limit hit", __func__);
292 return 0; 287 return 0;
293 } 288 }
294 spin_unlock_irqrestore(&priv->lock, flags); 289 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6e94a6711f08..847b805d63a3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -336,15 +336,42 @@ static int option_resume(struct usb_serial *serial);
336#define AIRPLUS_VENDOR_ID 0x1011 336#define AIRPLUS_VENDOR_ID 0x1011
337#define AIRPLUS_PRODUCT_MCD650 0x3198 337#define AIRPLUS_PRODUCT_MCD650 0x3198
338 338
339/* Longcheer/Longsung vendor ID; makes whitelabel devices that
340 * many other vendors like 4G Systems, Alcatel, ChinaBird,
341 * Mobidata, etc sell under their own brand names.
342 */
343#define LONGCHEER_VENDOR_ID 0x1c9e
344
339/* 4G Systems products */ 345/* 4G Systems products */
340#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e 346/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
347 * It seems to contain a Qualcomm QSC6240/6290 chipset */
341#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 348#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
342 349
343/* Haier products */ 350/* Haier products */
344#define HAIER_VENDOR_ID 0x201e 351#define HAIER_VENDOR_ID 0x201e
345#define HAIER_PRODUCT_CE100 0x2009 352#define HAIER_PRODUCT_CE100 0x2009
346 353
347static struct usb_device_id option_ids[] = { 354/* some devices interfaces need special handling due to a number of reasons */
355enum option_blacklist_reason {
356 OPTION_BLACKLIST_NONE = 0,
357 OPTION_BLACKLIST_SENDSETUP = 1,
358 OPTION_BLACKLIST_RESERVED_IF = 2
359};
360
361struct option_blacklist_info {
362 const u32 infolen; /* number of interface numbers on blacklist */
363 const u8 *ifaceinfo; /* pointer to the array holding the numbers */
364 enum option_blacklist_reason reason;
365};
366
367static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
368static const struct option_blacklist_info four_g_w14_blacklist = {
369 .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
370 .ifaceinfo = four_g_w14_no_sendsetup,
371 .reason = OPTION_BLACKLIST_SENDSETUP
372};
373
374static const struct usb_device_id option_ids[] = {
348 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 375 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
349 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 376 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
350 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) }, 377 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) },
@@ -644,7 +671,9 @@ static struct usb_device_id option_ids[] = {
644 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, 671 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
645 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 672 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
646 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, 673 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
647 { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, 674 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
675 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
676 },
648 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, 677 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
649 { } /* Terminating entry */ 678 { } /* Terminating entry */
650}; 679};
@@ -709,6 +738,7 @@ struct option_intf_private {
709 spinlock_t susp_lock; 738 spinlock_t susp_lock;
710 unsigned int suspended:1; 739 unsigned int suspended:1;
711 int in_flight; 740 int in_flight;
741 struct option_blacklist_info *blacklist_info;
712}; 742};
713 743
714struct option_port_private { 744struct option_port_private {
@@ -778,9 +808,27 @@ static int option_probe(struct usb_serial *serial,
778 if (!data) 808 if (!data)
779 return -ENOMEM; 809 return -ENOMEM;
780 spin_lock_init(&data->susp_lock); 810 spin_lock_init(&data->susp_lock);
811 data->blacklist_info = (struct option_blacklist_info*) id->driver_info;
781 return 0; 812 return 0;
782} 813}
783 814
815static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
816 const struct option_blacklist_info *blacklist)
817{
818 const u8 *info;
819 int i;
820
821 if (blacklist) {
822 info = blacklist->ifaceinfo;
823
824 for (i = 0; i < blacklist->infolen; i++) {
825 if (info[i] == ifnum)
826 return blacklist->reason;
827 }
828 }
829 return OPTION_BLACKLIST_NONE;
830}
831
784static void option_set_termios(struct tty_struct *tty, 832static void option_set_termios(struct tty_struct *tty,
785 struct usb_serial_port *port, struct ktermios *old_termios) 833 struct usb_serial_port *port, struct ktermios *old_termios)
786{ 834{
@@ -921,7 +969,6 @@ static void option_indat_callback(struct urb *urb)
921 } else { 969 } else {
922 tty = tty_port_tty_get(&port->port); 970 tty = tty_port_tty_get(&port->port);
923 if (urb->actual_length) { 971 if (urb->actual_length) {
924 tty_buffer_request_room(tty, urb->actual_length);
925 tty_insert_flip_string(tty, data, urb->actual_length); 972 tty_insert_flip_string(tty, data, urb->actual_length);
926 tty_flip_buffer_push(tty); 973 tty_flip_buffer_push(tty);
927 } else 974 } else
@@ -929,9 +976,9 @@ static void option_indat_callback(struct urb *urb)
929 tty_kref_put(tty); 976 tty_kref_put(tty);
930 977
931 /* Resubmit urb so we continue receiving */ 978 /* Resubmit urb so we continue receiving */
932 if (port->port.count && status != -ESHUTDOWN) { 979 if (status != -ESHUTDOWN) {
933 err = usb_submit_urb(urb, GFP_ATOMIC); 980 err = usb_submit_urb(urb, GFP_ATOMIC);
934 if (err) 981 if (err && err != -EPERM)
935 printk(KERN_ERR "%s: resubmit read urb failed. " 982 printk(KERN_ERR "%s: resubmit read urb failed. "
936 "(%d)", __func__, err); 983 "(%d)", __func__, err);
937 else 984 else
@@ -985,7 +1032,7 @@ static void option_instat_callback(struct urb *urb)
985 (struct usb_ctrlrequest *)urb->transfer_buffer; 1032 (struct usb_ctrlrequest *)urb->transfer_buffer;
986 1033
987 if (!req_pkt) { 1034 if (!req_pkt) {
988 dbg("%s: NULL req_pkt\n", __func__); 1035 dbg("%s: NULL req_pkt", __func__);
989 return; 1036 return;
990 } 1037 }
991 if ((req_pkt->bRequestType == 0xA1) && 1038 if ((req_pkt->bRequestType == 0xA1) &&
@@ -1211,11 +1258,19 @@ static void option_setup_urbs(struct usb_serial *serial)
1211static int option_send_setup(struct usb_serial_port *port) 1258static int option_send_setup(struct usb_serial_port *port)
1212{ 1259{
1213 struct usb_serial *serial = port->serial; 1260 struct usb_serial *serial = port->serial;
1261 struct option_intf_private *intfdata =
1262 (struct option_intf_private *) serial->private;
1214 struct option_port_private *portdata; 1263 struct option_port_private *portdata;
1215 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 1264 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
1216 int val = 0; 1265 int val = 0;
1217 dbg("%s", __func__); 1266 dbg("%s", __func__);
1218 1267
1268 if (is_blacklisted(ifNum, intfdata->blacklist_info) ==
1269 OPTION_BLACKLIST_SENDSETUP) {
1270 dbg("No send_setup on blacklisted interface #%d\n", ifNum);
1271 return -EIO;
1272 }
1273
1219 portdata = usb_get_serial_port_data(port); 1274 portdata = usb_get_serial_port_data(port);
1220 1275
1221 if (portdata->dtr_state) 1276 if (portdata->dtr_state)
@@ -1401,7 +1456,7 @@ static int option_resume(struct usb_serial *serial)
1401 for (i = 0; i < serial->num_ports; i++) { 1456 for (i = 0; i < serial->num_ports; i++) {
1402 port = serial->port[i]; 1457 port = serial->port[i];
1403 if (!port->interrupt_in_urb) { 1458 if (!port->interrupt_in_urb) {
1404 dbg("%s: No interrupt URB for port %d\n", __func__, i); 1459 dbg("%s: No interrupt URB for port %d", __func__, i);
1405 continue; 1460 continue;
1406 } 1461 }
1407 err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); 1462 err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index c644e26394b4..deeacdea05db 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -58,7 +58,7 @@
58#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>" 58#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
59#define OTI6858_VERSION "0.1" 59#define OTI6858_VERSION "0.1"
60 60
61static struct usb_device_id id_table [] = { 61static const struct usb_device_id id_table[] = {
62 { USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) }, 62 { USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
63 { } 63 { }
64}; 64};
@@ -302,7 +302,7 @@ void send_data(struct work_struct *work)
302 struct usb_serial_port *port = priv->port; 302 struct usb_serial_port *port = priv->port;
303 int count = 0, result; 303 int count = 0, result;
304 unsigned long flags; 304 unsigned long flags;
305 unsigned char allow; 305 u8 *allow;
306 306
307 dbg("%s(port = %d)", __func__, port->number); 307 dbg("%s(port = %d)", __func__, port->number);
308 308
@@ -321,13 +321,20 @@ void send_data(struct work_struct *work)
321 count = port->bulk_out_size; 321 count = port->bulk_out_size;
322 322
323 if (count != 0) { 323 if (count != 0) {
324 allow = kmalloc(1, GFP_KERNEL);
325 if (!allow) {
326 dev_err(&port->dev, "%s(): kmalloc failed\n",
327 __func__);
328 return;
329 }
324 result = usb_control_msg(port->serial->dev, 330 result = usb_control_msg(port->serial->dev,
325 usb_rcvctrlpipe(port->serial->dev, 0), 331 usb_rcvctrlpipe(port->serial->dev, 0),
326 OTI6858_REQ_T_CHECK_TXBUFF, 332 OTI6858_REQ_T_CHECK_TXBUFF,
327 OTI6858_REQ_CHECK_TXBUFF, 333 OTI6858_REQ_CHECK_TXBUFF,
328 count, 0, &allow, 1, 100); 334 count, 0, allow, 1, 100);
329 if (result != 1 || allow != 0) 335 if (result != 1 || *allow != 0)
330 count = 0; 336 count = 0;
337 kfree(allow);
331 } 338 }
332 339
333 if (count == 0) { 340 if (count == 0) {
@@ -578,9 +585,6 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
578 usb_clear_halt(serial->dev, port->write_urb->pipe); 585 usb_clear_halt(serial->dev, port->write_urb->pipe);
579 usb_clear_halt(serial->dev, port->read_urb->pipe); 586 usb_clear_halt(serial->dev, port->read_urb->pipe);
580 587
581 if (port->port.count != 1)
582 return 0;
583
584 buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL); 588 buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
585 if (buf == NULL) { 589 if (buf == NULL) {
586 dev_err(&port->dev, "%s(): out of memory!\n", __func__); 590 dev_err(&port->dev, "%s(): out of memory!\n", __func__);
@@ -927,10 +931,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
927 spin_unlock_irqrestore(&priv->lock, flags); 931 spin_unlock_irqrestore(&priv->lock, flags);
928 932
929 if (status != 0) { 933 if (status != 0) {
930 if (!port->port.count) {
931 dbg("%s(): port is closed, exiting", __func__);
932 return;
933 }
934 /* 934 /*
935 if (status == -EPROTO) { 935 if (status == -EPROTO) {
936 * PL2303 mysteriously fails with -EPROTO reschedule 936 * PL2303 mysteriously fails with -EPROTO reschedule
@@ -954,14 +954,12 @@ static void oti6858_read_bulk_callback(struct urb *urb)
954 } 954 }
955 tty_kref_put(tty); 955 tty_kref_put(tty);
956 956
957 /* schedule the interrupt urb if we are still open */ 957 /* schedule the interrupt urb */
958 if (port->port.count != 0) { 958 port->interrupt_in_urb->dev = port->serial->dev;
959 port->interrupt_in_urb->dev = port->serial->dev; 959 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
960 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); 960 if (result != 0 && result != -EPERM) {
961 if (result != 0) { 961 dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
962 dev_err(&port->dev, "%s(): usb_submit_urb() failed," 962 " error %d\n", __func__, result);
963 " error %d\n", __func__, result);
964 }
965 } 963 }
966} 964}
967 965
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9ec1a49e2362..73d5f346d3e0 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,7 +50,7 @@ struct pl2303_buf {
50 char *buf_put; 50 char *buf_put;
51}; 51};
52 52
53static struct usb_device_id id_table [] = { 53static const struct usb_device_id id_table[] = {
54 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) }, 54 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
55 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, 55 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
56 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, 56 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
@@ -451,7 +451,6 @@ static void pl2303_send(struct usb_serial_port *port)
451 port->write_urb->transfer_buffer); 451 port->write_urb->transfer_buffer);
452 452
453 port->write_urb->transfer_buffer_length = count; 453 port->write_urb->transfer_buffer_length = count;
454 port->write_urb->dev = port->serial->dev;
455 result = usb_submit_urb(port->write_urb, GFP_ATOMIC); 454 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
456 if (result) { 455 if (result) {
457 dev_err(&port->dev, "%s - failed submitting write urb," 456 dev_err(&port->dev, "%s - failed submitting write urb,"
@@ -769,7 +768,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
769 pl2303_set_termios(tty, port, &tmp_termios); 768 pl2303_set_termios(tty, port, &tmp_termios);
770 769
771 dbg("%s - submitting read urb", __func__); 770 dbg("%s - submitting read urb", __func__);
772 port->read_urb->dev = serial->dev;
773 result = usb_submit_urb(port->read_urb, GFP_KERNEL); 771 result = usb_submit_urb(port->read_urb, GFP_KERNEL);
774 if (result) { 772 if (result) {
775 dev_err(&port->dev, "%s - failed submitting read urb," 773 dev_err(&port->dev, "%s - failed submitting read urb,"
@@ -779,7 +777,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
779 } 777 }
780 778
781 dbg("%s - submitting interrupt urb", __func__); 779 dbg("%s - submitting interrupt urb", __func__);
782 port->interrupt_in_urb->dev = serial->dev;
783 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 780 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
784 if (result) { 781 if (result) {
785 dev_err(&port->dev, "%s - failed submitting interrupt urb," 782 dev_err(&port->dev, "%s - failed submitting interrupt urb,"
@@ -895,10 +892,23 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
895static int pl2303_ioctl(struct tty_struct *tty, struct file *file, 892static int pl2303_ioctl(struct tty_struct *tty, struct file *file,
896 unsigned int cmd, unsigned long arg) 893 unsigned int cmd, unsigned long arg)
897{ 894{
895 struct serial_struct ser;
898 struct usb_serial_port *port = tty->driver_data; 896 struct usb_serial_port *port = tty->driver_data;
899 dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd); 897 dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd);
900 898
901 switch (cmd) { 899 switch (cmd) {
900 case TIOCGSERIAL:
901 memset(&ser, 0, sizeof ser);
902 ser.type = PORT_16654;
903 ser.line = port->serial->minor;
904 ser.port = port->number;
905 ser.baud_base = 460800;
906
907 if (copy_to_user((void __user *)arg, &ser, sizeof ser))
908 return -EFAULT;
909
910 return 0;
911
902 case TIOCMIWAIT: 912 case TIOCMIWAIT:
903 dbg("%s (%d) TIOCMIWAIT", __func__, port->number); 913 dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
904 return wait_modem_info(port, arg); 914 return wait_modem_info(port, arg);
@@ -1042,7 +1052,6 @@ static void pl2303_push_data(struct tty_struct *tty,
1042 tty_flag = TTY_FRAME; 1052 tty_flag = TTY_FRAME;
1043 dbg("%s - tty_flag = %d", __func__, tty_flag); 1053 dbg("%s - tty_flag = %d", __func__, tty_flag);
1044 1054
1045 tty_buffer_request_room(tty, urb->actual_length + 1);
1046 /* overrun is special, not associated with a char */ 1055 /* overrun is special, not associated with a char */
1047 if (line_status & UART_OVERRUN_ERROR) 1056 if (line_status & UART_OVERRUN_ERROR)
1048 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1057 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -1072,16 +1081,11 @@ static void pl2303_read_bulk_callback(struct urb *urb)
1072 1081
1073 if (status) { 1082 if (status) {
1074 dbg("%s - urb status = %d", __func__, status); 1083 dbg("%s - urb status = %d", __func__, status);
1075 if (!port->port.count) {
1076 dbg("%s - port is closed, exiting.", __func__);
1077 return;
1078 }
1079 if (status == -EPROTO) { 1084 if (status == -EPROTO) {
1080 /* PL2303 mysteriously fails with -EPROTO reschedule 1085 /* PL2303 mysteriously fails with -EPROTO reschedule
1081 * the read */ 1086 * the read */
1082 dbg("%s - caught -EPROTO, resubmitting the urb", 1087 dbg("%s - caught -EPROTO, resubmitting the urb",
1083 __func__); 1088 __func__);
1084 urb->dev = port->serial->dev;
1085 result = usb_submit_urb(urb, GFP_ATOMIC); 1089 result = usb_submit_urb(urb, GFP_ATOMIC);
1086 if (result) 1090 if (result)
1087 dev_err(&urb->dev->dev, "%s - failed" 1091 dev_err(&urb->dev->dev, "%s - failed"
@@ -1108,15 +1112,10 @@ static void pl2303_read_bulk_callback(struct urb *urb)
1108 } 1112 }
1109 tty_kref_put(tty); 1113 tty_kref_put(tty);
1110 /* Schedule the next read _if_ we are still open */ 1114 /* Schedule the next read _if_ we are still open */
1111 if (port->port.count) { 1115 result = usb_submit_urb(urb, GFP_ATOMIC);
1112 urb->dev = port->serial->dev; 1116 if (result && result != -EPERM)
1113 result = usb_submit_urb(urb, GFP_ATOMIC); 1117 dev_err(&urb->dev->dev, "%s - failed resubmitting"
1114 if (result) 1118 " read urb, error %d\n", __func__, result);
1115 dev_err(&urb->dev->dev, "%s - failed resubmitting"
1116 " read urb, error %d\n", __func__, result);
1117 }
1118
1119 return;
1120} 1119}
1121 1120
1122static void pl2303_write_bulk_callback(struct urb *urb) 1121static void pl2303_write_bulk_callback(struct urb *urb)
@@ -1146,7 +1145,6 @@ static void pl2303_write_bulk_callback(struct urb *urb)
1146 dbg("%s - nonzero write bulk status received: %d", __func__, 1145 dbg("%s - nonzero write bulk status received: %d", __func__,
1147 status); 1146 status);
1148 port->write_urb->transfer_buffer_length = 1; 1147 port->write_urb->transfer_buffer_length = 1;
1149 port->write_urb->dev = port->serial->dev;
1150 result = usb_submit_urb(port->write_urb, GFP_ATOMIC); 1148 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
1151 if (result) 1149 if (result)
1152 dev_err(&urb->dev->dev, "%s - failed resubmitting write" 1150 dev_err(&urb->dev->dev, "%s - failed resubmitting write"
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
new file mode 100644
index 000000000000..0b9362061713
--- /dev/null
+++ b/drivers/usb/serial/qcaux.c
@@ -0,0 +1,96 @@
1/*
2 * Qualcomm USB Auxiliary Serial Port driver
3 *
4 * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
5 * Copyright (C) 2010 Dan Williams <dcbw@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Devices listed here usually provide a CDC ACM port on which normal modem
12 * AT commands and PPP can be used. But when that port is in-use by PPP it
13 * cannot be used simultaneously for status or signal strength. Instead, the
14 * ports here can be queried for that information using the Qualcomm DM
15 * protocol.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/tty.h>
21#include <linux/module.h>
22#include <linux/usb.h>
23#include <linux/usb/serial.h>
24
25/* NOTE: for now, only use this driver for devices that provide a CDC-ACM port
26 * for normal AT commands, but also provide secondary USB interfaces for the
27 * QCDM-capable ports. Devices that do not provide a CDC-ACM port should
28 * probably be driven by option.ko.
29 */
30
31/* UTStarcom/Pantech/Curitel devices */
32#define UTSTARCOM_VENDOR_ID 0x106c
33#define UTSTARCOM_PRODUCT_PC5740 0x3701
34#define UTSTARCOM_PRODUCT_PC5750 0x3702 /* aka Pantech PX-500 */
35#define UTSTARCOM_PRODUCT_UM150 0x3711
36#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
37#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
38#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
39
40/* CMOTECH devices */
41#define CMOTECH_VENDOR_ID 0x16d8
42#define CMOTECH_PRODUCT_CDU550 0x5553
43#define CMOTECH_PRODUCT_CDX650 0x6512
44
45static struct usb_device_id id_table[] = {
46 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
47 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
48 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) },
49 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V1, 0xff, 0x00, 0x00) },
50 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V2, 0xff, 0x00, 0x00) },
51 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
52 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
53 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
54 { },
55};
56MODULE_DEVICE_TABLE(usb, id_table);
57
58static struct usb_driver qcaux_driver = {
59 .name = "qcaux",
60 .probe = usb_serial_probe,
61 .disconnect = usb_serial_disconnect,
62 .id_table = id_table,
63 .no_dynamic_id = 1,
64};
65
66static struct usb_serial_driver qcaux_device = {
67 .driver = {
68 .owner = THIS_MODULE,
69 .name = "qcaux",
70 },
71 .id_table = id_table,
72 .num_ports = 1,
73};
74
75static int __init qcaux_init(void)
76{
77 int retval;
78
79 retval = usb_serial_register(&qcaux_device);
80 if (retval)
81 return retval;
82 retval = usb_register(&qcaux_driver);
83 if (retval)
84 usb_serial_deregister(&qcaux_device);
85 return retval;
86}
87
88static void __exit qcaux_exit(void)
89{
90 usb_deregister(&qcaux_driver);
91 usb_serial_deregister(&qcaux_device);
92}
93
94module_init(qcaux_init);
95module_exit(qcaux_exit);
96MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 7528b8d57f1c..310ff6ec6567 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -21,7 +21,7 @@
21 21
22static int debug; 22static int debug;
23 23
24static struct usb_device_id id_table[] = { 24static const struct usb_device_id id_table[] = {
25 {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ 25 {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
26 {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 26 {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
27 {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 27 {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index 951ea0c6ba77..cb8195cabfde 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -22,7 +22,7 @@
22#define DRIVER_DESC "Driver for Siemens USB/MPI adapter" 22#define DRIVER_DESC "Driver for Siemens USB/MPI adapter"
23 23
24 24
25static struct usb_device_id id_table[] = { 25static const struct usb_device_id id_table[] = {
26 /* Vendor and product id for 6ES7-972-0CB20-0XA0 */ 26 /* Vendor and product id for 6ES7-972-0CB20-0XA0 */
27 { USB_DEVICE(0x908, 0x0004) }, 27 { USB_DEVICE(0x908, 0x0004) },
28 { }, 28 { },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 3eb6143bb646..34e6f894cba9 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -226,7 +226,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
226 .ifaceinfo = direct_ip_non_serial_ifaces, 226 .ifaceinfo = direct_ip_non_serial_ifaces,
227}; 227};
228 228
229static struct usb_device_id id_table [] = { 229static const struct usb_device_id id_table[] = {
230 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ 230 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
231 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ 231 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
232 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ 232 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
@@ -304,16 +304,6 @@ static struct usb_device_id id_table [] = {
304}; 304};
305MODULE_DEVICE_TABLE(usb, id_table); 305MODULE_DEVICE_TABLE(usb, id_table);
306 306
307static struct usb_driver sierra_driver = {
308 .name = "sierra",
309 .probe = usb_serial_probe,
310 .disconnect = usb_serial_disconnect,
311 .suspend = usb_serial_suspend,
312 .resume = usb_serial_resume,
313 .id_table = id_table,
314 .no_dynamic_id = 1,
315 .supports_autosuspend = 1,
316};
317 307
318struct sierra_port_private { 308struct sierra_port_private {
319 spinlock_t lock; /* lock the structure */ 309 spinlock_t lock; /* lock the structure */
@@ -477,7 +467,7 @@ static void sierra_outdat_callback(struct urb *urb)
477static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port, 467static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
478 const unsigned char *buf, int count) 468 const unsigned char *buf, int count)
479{ 469{
480 struct sierra_port_private *portdata = usb_get_serial_port_data(port); 470 struct sierra_port_private *portdata;
481 struct sierra_intf_private *intfdata; 471 struct sierra_intf_private *intfdata;
482 struct usb_serial *serial = port->serial; 472 struct usb_serial *serial = port->serial;
483 unsigned long flags; 473 unsigned long flags;
@@ -604,14 +594,15 @@ static void sierra_indat_callback(struct urb *urb)
604 } else { 594 } else {
605 if (urb->actual_length) { 595 if (urb->actual_length) {
606 tty = tty_port_tty_get(&port->port); 596 tty = tty_port_tty_get(&port->port);
607 597 if (tty) {
608 tty_buffer_request_room(tty, urb->actual_length); 598 tty_insert_flip_string(tty, data,
609 tty_insert_flip_string(tty, data, urb->actual_length); 599 urb->actual_length);
610 tty_flip_buffer_push(tty); 600 tty_flip_buffer_push(tty);
611 601
612 tty_kref_put(tty); 602 tty_kref_put(tty);
613 usb_serial_debug_data(debug, &port->dev, __func__, 603 usb_serial_debug_data(debug, &port->dev,
614 urb->actual_length, data); 604 __func__, urb->actual_length, data);
605 }
615 } else { 606 } else {
616 dev_dbg(&port->dev, "%s: empty read urb" 607 dev_dbg(&port->dev, "%s: empty read urb"
617 " received\n", __func__); 608 " received\n", __func__);
@@ -619,10 +610,10 @@ static void sierra_indat_callback(struct urb *urb)
619 } 610 }
620 611
621 /* Resubmit urb so we continue receiving */ 612 /* Resubmit urb so we continue receiving */
622 if (port->port.count && status != -ESHUTDOWN && status != -EPERM) { 613 if (status != -ESHUTDOWN && status != -EPERM) {
623 usb_mark_last_busy(port->serial->dev); 614 usb_mark_last_busy(port->serial->dev);
624 err = usb_submit_urb(urb, GFP_ATOMIC); 615 err = usb_submit_urb(urb, GFP_ATOMIC);
625 if (err) 616 if (err && err != -EPERM)
626 dev_err(&port->dev, "resubmit read urb failed." 617 dev_err(&port->dev, "resubmit read urb failed."
627 "(%d)\n", err); 618 "(%d)\n", err);
628 } 619 }
@@ -681,11 +672,11 @@ static void sierra_instat_callback(struct urb *urb)
681 dev_dbg(&port->dev, "%s: error %d\n", __func__, status); 672 dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
682 673
683 /* Resubmit urb so we continue receiving IRQ data */ 674 /* Resubmit urb so we continue receiving IRQ data */
684 if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) { 675 if (status != -ESHUTDOWN && status != -ENOENT) {
685 usb_mark_last_busy(serial->dev); 676 usb_mark_last_busy(serial->dev);
686 urb->dev = serial->dev; 677 urb->dev = serial->dev;
687 err = usb_submit_urb(urb, GFP_ATOMIC); 678 err = usb_submit_urb(urb, GFP_ATOMIC);
688 if (err) 679 if (err && err != -EPERM)
689 dev_err(&port->dev, "%s: resubmit intr urb " 680 dev_err(&port->dev, "%s: resubmit intr urb "
690 "failed. (%d)\n", __func__, err); 681 "failed. (%d)\n", __func__, err);
691 } 682 }
@@ -1061,11 +1052,31 @@ static int sierra_resume(struct usb_serial *serial)
1061 1052
1062 return ec ? -EIO : 0; 1053 return ec ? -EIO : 0;
1063} 1054}
1055
1056static int sierra_reset_resume(struct usb_interface *intf)
1057{
1058 struct usb_serial *serial = usb_get_intfdata(intf);
1059 dev_err(&serial->dev->dev, "%s\n", __func__);
1060 return usb_serial_resume(intf);
1061}
1064#else 1062#else
1065#define sierra_suspend NULL 1063#define sierra_suspend NULL
1066#define sierra_resume NULL 1064#define sierra_resume NULL
1065#define sierra_reset_resume NULL
1067#endif 1066#endif
1068 1067
1068static struct usb_driver sierra_driver = {
1069 .name = "sierra",
1070 .probe = usb_serial_probe,
1071 .disconnect = usb_serial_disconnect,
1072 .suspend = usb_serial_suspend,
1073 .resume = usb_serial_resume,
1074 .reset_resume = sierra_reset_resume,
1075 .id_table = id_table,
1076 .no_dynamic_id = 1,
1077 .supports_autosuspend = 1,
1078};
1079
1069static struct usb_serial_driver sierra_device = { 1080static struct usb_serial_driver sierra_device = {
1070 .driver = { 1081 .driver = {
1071 .owner = THIS_MODULE, 1082 .owner = THIS_MODULE,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 1e58220403d1..5d39191e7244 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -45,7 +45,7 @@ static int debug;
45#define SPCP8x5_835_VID 0x04fc 45#define SPCP8x5_835_VID 0x04fc
46#define SPCP8x5_835_PID 0x0231 46#define SPCP8x5_835_PID 0x0231
47 47
48static struct usb_device_id id_table [] = { 48static const struct usb_device_id id_table[] = {
49 { USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)}, 49 { USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)},
50 { USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)}, 50 { USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)},
51 { USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)}, 51 { USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)},
@@ -609,7 +609,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
609 if (i < 0) 609 if (i < 0)
610 dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n", 610 dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n",
611 uartdata, i); 611 uartdata, i);
612 dbg("0x21:0x40:0:0 %d\n", i); 612 dbg("0x21:0x40:0:0 %d", i);
613 613
614 if (cflag & CRTSCTS) { 614 if (cflag & CRTSCTS) {
615 /* enable hardware flow control */ 615 /* enable hardware flow control */
@@ -677,7 +677,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
677 struct tty_struct *tty; 677 struct tty_struct *tty;
678 unsigned char *data = urb->transfer_buffer; 678 unsigned char *data = urb->transfer_buffer;
679 unsigned long flags; 679 unsigned long flags;
680 int i;
681 int result = urb->status; 680 int result = urb->status;
682 u8 status; 681 u8 status;
683 char tty_flag; 682 char tty_flag;
@@ -687,8 +686,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
687 686
688 /* check the urb status */ 687 /* check the urb status */
689 if (result) { 688 if (result) {
690 if (!port->port.count)
691 return;
692 if (result == -EPROTO) { 689 if (result == -EPROTO) {
693 /* spcp8x5 mysteriously fails with -EPROTO */ 690 /* spcp8x5 mysteriously fails with -EPROTO */
694 /* reschedule the read */ 691 /* reschedule the read */
@@ -726,26 +723,20 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
726 723
727 tty = tty_port_tty_get(&port->port); 724 tty = tty_port_tty_get(&port->port);
728 if (tty && urb->actual_length) { 725 if (tty && urb->actual_length) {
729 tty_buffer_request_room(tty, urb->actual_length + 1);
730 /* overrun is special, not associated with a char */ 726 /* overrun is special, not associated with a char */
731 if (status & UART_OVERRUN_ERROR) 727 if (status & UART_OVERRUN_ERROR)
732 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 728 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
733 for (i = 0; i < urb->actual_length; ++i) 729 tty_insert_flip_string_fixed_flag(tty, data,
734 tty_insert_flip_char(tty, data[i], tty_flag); 730 urb->actual_length, tty_flag);
735 tty_flip_buffer_push(tty); 731 tty_flip_buffer_push(tty);
736 } 732 }
737 tty_kref_put(tty); 733 tty_kref_put(tty);
738 734
739 /* Schedule the next read _if_ we are still open */ 735 /* Schedule the next read */
740 if (port->port.count) { 736 urb->dev = port->serial->dev;
741 urb->dev = port->serial->dev; 737 result = usb_submit_urb(urb , GFP_ATOMIC);
742 result = usb_submit_urb(urb , GFP_ATOMIC); 738 if (result)
743 if (result) 739 dev_dbg(&port->dev, "failed submitting read urb %d\n", result);
744 dev_dbg(&port->dev, "failed submitting read urb %d\n",
745 result);
746 }
747
748 return;
749} 740}
750 741
751/* get data from ring buffer and then write to usb bus */ 742/* get data from ring buffer and then write to usb bus */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index b282c0f2d8e5..72398888858f 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -21,7 +21,7 @@
21 21
22static int debug; 22static int debug;
23 23
24static struct usb_device_id id_table[] = { 24static const struct usb_device_id id_table[] = {
25 { USB_DEVICE(0x05e0, 0x0600) }, 25 { USB_DEVICE(0x05e0, 0x0600) },
26 { }, 26 { },
27}; 27};
@@ -51,7 +51,6 @@ static void symbol_int_callback(struct urb *urb)
51 int status = urb->status; 51 int status = urb->status;
52 struct tty_struct *tty; 52 struct tty_struct *tty;
53 int result; 53 int result;
54 int available_room = 0;
55 int data_length; 54 int data_length;
56 55
57 dbg("%s - port %d", __func__, port->number); 56 dbg("%s - port %d", __func__, port->number);
@@ -89,13 +88,8 @@ static void symbol_int_callback(struct urb *urb)
89 */ 88 */
90 tty = tty_port_tty_get(&port->port); 89 tty = tty_port_tty_get(&port->port);
91 if (tty) { 90 if (tty) {
92 available_room = tty_buffer_request_room(tty, 91 tty_insert_flip_string(tty, &data[1], data_length);
93 data_length); 92 tty_flip_buffer_push(tty);
94 if (available_room) {
95 tty_insert_flip_string(tty, &data[1],
96 available_room);
97 tty_flip_buffer_push(tty);
98 }
99 tty_kref_put(tty); 93 tty_kref_put(tty);
100 } 94 }
101 } else { 95 } else {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 1e9dc8821698..0afe5c71c17e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1271,14 +1271,13 @@ static void ti_recv(struct device *dev, struct tty_struct *tty,
1271 int cnt; 1271 int cnt;
1272 1272
1273 do { 1273 do {
1274 cnt = tty_buffer_request_room(tty, length); 1274 cnt = tty_insert_flip_string(tty, data, length);
1275 if (cnt < length) { 1275 if (cnt < length) {
1276 dev_err(dev, "%s - dropping data, %d bytes lost\n", 1276 dev_err(dev, "%s - dropping data, %d bytes lost\n",
1277 __func__, length - cnt); 1277 __func__, length - cnt);
1278 if (cnt == 0) 1278 if (cnt == 0)
1279 break; 1279 break;
1280 } 1280 }
1281 tty_insert_flip_string(tty, data, cnt);
1282 tty_flip_buffer_push(tty); 1281 tty_flip_buffer_push(tty);
1283 data += cnt; 1282 data += cnt;
1284 length -= cnt; 1283 length -= cnt;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 33c85f7084f8..3873660d8217 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -358,10 +358,6 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
358 358
359 dbg("%s - port %d, %d byte(s)", __func__, port->number, count); 359 dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
360 360
361 /* count is managed under the mutex lock for the tty so cannot
362 drop to zero until after the last close completes */
363 WARN_ON(!port->port.count);
364
365 /* pass on to the driver specific version of this function */ 361 /* pass on to the driver specific version of this function */
366 retval = port->serial->type->write(tty, port, buf, count); 362 retval = port->serial->type->write(tty, port, buf, count);
367 363
@@ -373,7 +369,6 @@ static int serial_write_room(struct tty_struct *tty)
373{ 369{
374 struct usb_serial_port *port = tty->driver_data; 370 struct usb_serial_port *port = tty->driver_data;
375 dbg("%s - port %d", __func__, port->number); 371 dbg("%s - port %d", __func__, port->number);
376 WARN_ON(!port->port.count);
377 /* pass on to the driver specific version of this function */ 372 /* pass on to the driver specific version of this function */
378 return port->serial->type->write_room(tty); 373 return port->serial->type->write_room(tty);
379} 374}
@@ -381,7 +376,7 @@ static int serial_write_room(struct tty_struct *tty)
381static int serial_chars_in_buffer(struct tty_struct *tty) 376static int serial_chars_in_buffer(struct tty_struct *tty)
382{ 377{
383 struct usb_serial_port *port = tty->driver_data; 378 struct usb_serial_port *port = tty->driver_data;
384 dbg("%s = port %d", __func__, port->number); 379 dbg("%s - port %d", __func__, port->number);
385 380
386 /* if the device was unplugged then any remaining characters 381 /* if the device was unplugged then any remaining characters
387 fell out of the connector ;) */ 382 fell out of the connector ;) */
@@ -396,7 +391,6 @@ static void serial_throttle(struct tty_struct *tty)
396 struct usb_serial_port *port = tty->driver_data; 391 struct usb_serial_port *port = tty->driver_data;
397 dbg("%s - port %d", __func__, port->number); 392 dbg("%s - port %d", __func__, port->number);
398 393
399 WARN_ON(!port->port.count);
400 /* pass on to the driver specific version of this function */ 394 /* pass on to the driver specific version of this function */
401 if (port->serial->type->throttle) 395 if (port->serial->type->throttle)
402 port->serial->type->throttle(tty); 396 port->serial->type->throttle(tty);
@@ -407,7 +401,6 @@ static void serial_unthrottle(struct tty_struct *tty)
407 struct usb_serial_port *port = tty->driver_data; 401 struct usb_serial_port *port = tty->driver_data;
408 dbg("%s - port %d", __func__, port->number); 402 dbg("%s - port %d", __func__, port->number);
409 403
410 WARN_ON(!port->port.count);
411 /* pass on to the driver specific version of this function */ 404 /* pass on to the driver specific version of this function */
412 if (port->serial->type->unthrottle) 405 if (port->serial->type->unthrottle)
413 port->serial->type->unthrottle(tty); 406 port->serial->type->unthrottle(tty);
@@ -421,8 +414,6 @@ static int serial_ioctl(struct tty_struct *tty, struct file *file,
421 414
422 dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd); 415 dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd);
423 416
424 WARN_ON(!port->port.count);
425
426 /* pass on to the driver specific version of this function 417 /* pass on to the driver specific version of this function
427 if it is available */ 418 if it is available */
428 if (port->serial->type->ioctl) { 419 if (port->serial->type->ioctl) {
@@ -437,7 +428,6 @@ static void serial_set_termios(struct tty_struct *tty, struct ktermios *old)
437 struct usb_serial_port *port = tty->driver_data; 428 struct usb_serial_port *port = tty->driver_data;
438 dbg("%s - port %d", __func__, port->number); 429 dbg("%s - port %d", __func__, port->number);
439 430
440 WARN_ON(!port->port.count);
441 /* pass on to the driver specific version of this function 431 /* pass on to the driver specific version of this function
442 if it is available */ 432 if it is available */
443 if (port->serial->type->set_termios) 433 if (port->serial->type->set_termios)
@@ -452,7 +442,6 @@ static int serial_break(struct tty_struct *tty, int break_state)
452 442
453 dbg("%s - port %d", __func__, port->number); 443 dbg("%s - port %d", __func__, port->number);
454 444
455 WARN_ON(!port->port.count);
456 /* pass on to the driver specific version of this function 445 /* pass on to the driver specific version of this function
457 if it is available */ 446 if it is available */
458 if (port->serial->type->break_ctl) 447 if (port->serial->type->break_ctl)
@@ -513,7 +502,6 @@ static int serial_tiocmget(struct tty_struct *tty, struct file *file)
513 502
514 dbg("%s - port %d", __func__, port->number); 503 dbg("%s - port %d", __func__, port->number);
515 504
516 WARN_ON(!port->port.count);
517 if (port->serial->type->tiocmget) 505 if (port->serial->type->tiocmget)
518 return port->serial->type->tiocmget(tty, file); 506 return port->serial->type->tiocmget(tty, file);
519 return -EINVAL; 507 return -EINVAL;
@@ -526,7 +514,6 @@ static int serial_tiocmset(struct tty_struct *tty, struct file *file,
526 514
527 dbg("%s - port %d", __func__, port->number); 515 dbg("%s - port %d", __func__, port->number);
528 516
529 WARN_ON(!port->port.count);
530 if (port->serial->type->tiocmset) 517 if (port->serial->type->tiocmset)
531 return port->serial->type->tiocmset(tty, file, set, clear); 518 return port->serial->type->tiocmset(tty, file, set, clear);
532 return -EINVAL; 519 return -EINVAL;
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 7b5bfc4edd3d..252cc2d993b2 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -29,7 +29,7 @@ static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
29 0xff, 29 0xff,
30}; 30};
31 31
32static struct usb_device_id id_table [] = { 32static const struct usb_device_id id_table[] = {
33 { USB_DEVICE(0x0525, 0x127a) }, 33 { USB_DEVICE(0x0525, 0x127a) },
34 { }, 34 { },
35}; 35};
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index ad1f9232292d..094942707c7d 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -368,7 +368,7 @@ static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
368 spin_lock_irqsave(&priv->lock, flags); 368 spin_lock_irqsave(&priv->lock, flags);
369 if (priv->outstanding_urbs > URB_UPPER_LIMIT) { 369 if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
370 spin_unlock_irqrestore(&priv->lock, flags); 370 spin_unlock_irqrestore(&priv->lock, flags);
371 dbg("%s - write limit hit\n", __func__); 371 dbg("%s - write limit hit", __func__);
372 return 0; 372 return 0;
373 } 373 }
374 priv->outstanding_urbs++; 374 priv->outstanding_urbs++;
@@ -446,7 +446,7 @@ static int visor_write_room(struct tty_struct *tty)
446 spin_lock_irqsave(&priv->lock, flags); 446 spin_lock_irqsave(&priv->lock, flags);
447 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) { 447 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
448 spin_unlock_irqrestore(&priv->lock, flags); 448 spin_unlock_irqrestore(&priv->lock, flags);
449 dbg("%s - write limit hit\n", __func__); 449 dbg("%s - write limit hit", __func__);
450 return 0; 450 return 0;
451 } 451 }
452 spin_unlock_irqrestore(&priv->lock, flags); 452 spin_unlock_irqrestore(&priv->lock, flags);
@@ -503,13 +503,9 @@ static void visor_read_bulk_callback(struct urb *urb)
503 if (urb->actual_length) { 503 if (urb->actual_length) {
504 tty = tty_port_tty_get(&port->port); 504 tty = tty_port_tty_get(&port->port);
505 if (tty) { 505 if (tty) {
506 available_room = tty_buffer_request_room(tty, 506 tty_insert_flip_string(tty, data,
507 urb->actual_length); 507 urb->actual_length);
508 if (available_room) { 508 tty_flip_buffer_push(tty);
509 tty_insert_flip_string(tty, data,
510 available_room);
511 tty_flip_buffer_push(tty);
512 }
513 tty_kref_put(tty); 509 tty_kref_put(tty);
514 } 510 }
515 spin_lock(&priv->lock); 511 spin_lock(&priv->lock);
@@ -807,10 +803,14 @@ static int clie_3_5_startup(struct usb_serial *serial)
807{ 803{
808 struct device *dev = &serial->dev->dev; 804 struct device *dev = &serial->dev->dev;
809 int result; 805 int result;
810 u8 data; 806 u8 *data;
811 807
812 dbg("%s", __func__); 808 dbg("%s", __func__);
813 809
810 data = kmalloc(1, GFP_KERNEL);
811 if (!data)
812 return -ENOMEM;
813
814 /* 814 /*
815 * Note that PEG-300 series devices expect the following two calls. 815 * Note that PEG-300 series devices expect the following two calls.
816 */ 816 */
@@ -818,36 +818,42 @@ static int clie_3_5_startup(struct usb_serial *serial)
818 /* get the config number */ 818 /* get the config number */
819 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 819 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
820 USB_REQ_GET_CONFIGURATION, USB_DIR_IN, 820 USB_REQ_GET_CONFIGURATION, USB_DIR_IN,
821 0, 0, &data, 1, 3000); 821 0, 0, data, 1, 3000);
822 if (result < 0) { 822 if (result < 0) {
823 dev_err(dev, "%s: get config number failed: %d\n", 823 dev_err(dev, "%s: get config number failed: %d\n",
824 __func__, result); 824 __func__, result);
825 return result; 825 goto out;
826 } 826 }
827 if (result != 1) { 827 if (result != 1) {
828 dev_err(dev, "%s: get config number bad return length: %d\n", 828 dev_err(dev, "%s: get config number bad return length: %d\n",
829 __func__, result); 829 __func__, result);
830 return -EIO; 830 result = -EIO;
831 goto out;
831 } 832 }
832 833
833 /* get the interface number */ 834 /* get the interface number */
834 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 835 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
835 USB_REQ_GET_INTERFACE, 836 USB_REQ_GET_INTERFACE,
836 USB_DIR_IN | USB_RECIP_INTERFACE, 837 USB_DIR_IN | USB_RECIP_INTERFACE,
837 0, 0, &data, 1, 3000); 838 0, 0, data, 1, 3000);
838 if (result < 0) { 839 if (result < 0) {
839 dev_err(dev, "%s: get interface number failed: %d\n", 840 dev_err(dev, "%s: get interface number failed: %d\n",
840 __func__, result); 841 __func__, result);
841 return result; 842 goto out;
842 } 843 }
843 if (result != 1) { 844 if (result != 1) {
844 dev_err(dev, 845 dev_err(dev,
845 "%s: get interface number bad return length: %d\n", 846 "%s: get interface number bad return length: %d\n",
846 __func__, result); 847 __func__, result);
847 return -EIO; 848 result = -EIO;
849 goto out;
848 } 850 }
849 851
850 return generic_startup(serial); 852 result = generic_startup(serial);
853out:
854 kfree(data);
855
856 return result;
851} 857}
852 858
853static int treo_attach(struct usb_serial *serial) 859static int treo_attach(struct usb_serial *serial)
diff --git a/drivers/usb/serial/vivopay-serial.c b/drivers/usb/serial/vivopay-serial.c
new file mode 100644
index 000000000000..f719d00972fc
--- /dev/null
+++ b/drivers/usb/serial/vivopay-serial.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com)
3 * Copyright (C) 2009 Outpost Embedded, LLC
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/tty.h>
9#include <linux/module.h>
10#include <linux/usb.h>
11#include <linux/usb/serial.h>
12
13
14#define DRIVER_VERSION "v1.0"
15#define DRIVER_DESC "ViVOpay USB Serial Driver"
16
17#define VIVOPAY_VENDOR_ID 0x1d5f
18
19
20static struct usb_device_id id_table [] = {
21 /* ViVOpay 8800 */
22 { USB_DEVICE(VIVOPAY_VENDOR_ID, 0x1004) },
23 { },
24};
25
26MODULE_DEVICE_TABLE(usb, id_table);
27
28static struct usb_driver vivopay_serial_driver = {
29 .name = "vivopay-serial",
30 .probe = usb_serial_probe,
31 .disconnect = usb_serial_disconnect,
32 .id_table = id_table,
33 .no_dynamic_id = 1,
34};
35
36static struct usb_serial_driver vivopay_serial_device = {
37 .driver = {
38 .owner = THIS_MODULE,
39 .name = "vivopay-serial",
40 },
41 .id_table = id_table,
42 .usb_driver = &vivopay_serial_driver,
43 .num_ports = 1,
44};
45
46static int __init vivopay_serial_init(void)
47{
48 int retval;
49 retval = usb_serial_register(&vivopay_serial_device);
50 if (retval)
51 goto failed_usb_serial_register;
52 retval = usb_register(&vivopay_serial_driver);
53 if (retval)
54 goto failed_usb_register;
55 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
56 DRIVER_DESC "\n");
57 return 0;
58failed_usb_register:
59 usb_serial_deregister(&vivopay_serial_device);
60failed_usb_serial_register:
61 return retval;
62}
63
64static void __exit vivopay_serial_exit(void)
65{
66 usb_deregister(&vivopay_serial_driver);
67 usb_serial_deregister(&vivopay_serial_device);
68}
69
70module_init(vivopay_serial_init);
71module_exit(vivopay_serial_exit);
72
73MODULE_AUTHOR("Forest Bond <forest.bond@outpostembedded.com>");
74MODULE_DESCRIPTION(DRIVER_DESC);
75MODULE_VERSION(DRIVER_VERSION);
76MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 1093d2eb046a..12ed8209ca72 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -111,17 +111,17 @@ static int debug;
111 separate ID tables, and then a third table that combines them 111 separate ID tables, and then a third table that combines them
112 just for the purpose of exporting the autoloading information. 112 just for the purpose of exporting the autoloading information.
113*/ 113*/
114static struct usb_device_id id_table_std [] = { 114static const struct usb_device_id id_table_std[] = {
115 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) }, 115 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
116 { } /* Terminating entry */ 116 { } /* Terminating entry */
117}; 117};
118 118
119static struct usb_device_id id_table_prerenumeration [] = { 119static const struct usb_device_id id_table_prerenumeration[] = {
120 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) }, 120 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
121 { } /* Terminating entry */ 121 { } /* Terminating entry */
122}; 122};
123 123
124static struct usb_device_id id_table_combined [] = { 124static const struct usb_device_id id_table_combined[] = {
125 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) }, 125 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
126 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) }, 126 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
127 { } /* Terminating entry */ 127 { } /* Terminating entry */
@@ -1492,21 +1492,9 @@ static void rx_data_softint(struct work_struct *work)
1492 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list); 1492 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
1493 urb = wrap->urb; 1493 urb = wrap->urb;
1494 1494
1495 if (tty && urb->actual_length) { 1495 if (tty && urb->actual_length)
1496 int len = tty_buffer_request_room(tty, 1496 sent += tty_insert_flip_string(tty,
1497 urb->actual_length); 1497 urb->transfer_buffer, urb->actual_length);
1498 /* This stuff can go away now I suspect */
1499 if (unlikely(len < urb->actual_length)) {
1500 spin_lock_irqsave(&info->lock, flags);
1501 list_add(tmp, &info->rx_urb_q);
1502 spin_unlock_irqrestore(&info->lock, flags);
1503 tty_flip_buffer_push(tty);
1504 schedule_work(&info->rx_work);
1505 goto out;
1506 }
1507 tty_insert_flip_string(tty, urb->transfer_buffer, len);
1508 sent += len;
1509 }
1510 1498
1511 urb->dev = port->serial->dev; 1499 urb->dev = port->serial->dev;
1512 result = usb_submit_urb(urb, GFP_ATOMIC); 1500 result = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 80e65f29921c..198bb3ed95b2 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -202,7 +202,7 @@ static int onetouch_connect_input(struct us_data *ss)
202 goto fail1; 202 goto fail1;
203 203
204 onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN, 204 onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
205 GFP_ATOMIC, &onetouch->data_dma); 205 GFP_KERNEL, &onetouch->data_dma);
206 if (!onetouch->data) 206 if (!onetouch->data)
207 goto fail1; 207 goto fail1;
208 208
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index aadc16b5eed7..4cc035562cc2 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -133,7 +133,7 @@ static int slave_configure(struct scsi_device *sdev)
133 133
134 if (us->fflags & US_FL_MAX_SECTORS_MIN) 134 if (us->fflags & US_FL_MAX_SECTORS_MIN)
135 max_sectors = PAGE_CACHE_SIZE >> 9; 135 max_sectors = PAGE_CACHE_SIZE >> 9;
136 if (queue_max_sectors(sdev->request_queue) > max_sectors) 136 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
137 blk_queue_max_hw_sectors(sdev->request_queue, 137 blk_queue_max_hw_sectors(sdev->request_queue,
138 max_sectors); 138 max_sectors);
139 } else if (sdev->type == TYPE_TAPE) { 139 } else if (sdev->type == TYPE_TAPE) {
@@ -484,7 +484,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
484{ 484{
485 struct scsi_device *sdev = to_scsi_device(dev); 485 struct scsi_device *sdev = to_scsi_device(dev);
486 486
487 return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue)); 487 return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue));
488} 488}
489 489
490/* Input routine for the sysfs max_sectors file */ 490/* Input routine for the sysfs max_sectors file */
@@ -494,9 +494,9 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
494 struct scsi_device *sdev = to_scsi_device(dev); 494 struct scsi_device *sdev = to_scsi_device(dev);
495 unsigned short ms; 495 unsigned short ms;
496 496
497 if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { 497 if (sscanf(buf, "%hu", &ms) > 0) {
498 blk_queue_max_hw_sectors(sdev->request_queue, ms); 498 blk_queue_max_hw_sectors(sdev->request_queue, ms);
499 return strlen(buf); 499 return count;
500 } 500 }
501 return -EINVAL; 501 return -EINVAL;
502} 502}
@@ -539,7 +539,7 @@ struct scsi_host_template usb_stor_host_template = {
539 .slave_configure = slave_configure, 539 .slave_configure = slave_configure,
540 540
541 /* lots of sg segments can be handled */ 541 /* lots of sg segments can be handled */
542 .sg_tablesize = SG_ALL, 542 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
543 543
544 /* limit the total size of a transfer to 120 KB */ 544 /* limit the total size of a transfer to 120 KB */
545 .max_sectors = 240, 545 .max_sectors = 240,
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index b62a28814ebe..bd3f415893d8 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1628,10 +1628,10 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
1628 return USB_STOR_TRANSPORT_ERROR; 1628 return USB_STOR_TRANSPORT_ERROR;
1629 } 1629 }
1630 1630
1631 if ( (result = usbat_multiple_write(us, 1631 result = usbat_multiple_write(us, registers, data, 7);
1632 registers, data, 7)) != USB_STOR_TRANSPORT_GOOD) { 1632
1633 if (result != USB_STOR_TRANSPORT_GOOD)
1633 return result; 1634 return result;
1634 }
1635 1635
1636 /* 1636 /*
1637 * Write the 12-byte command header. 1637 * Write the 12-byte command header.
@@ -1643,12 +1643,11 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
1643 * AT SPEED 4 IS UNRELIABLE!!! 1643 * AT SPEED 4 IS UNRELIABLE!!!
1644 */ 1644 */
1645 1645
1646 if ((result = usbat_write_block(us, 1646 result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12,
1647 USBAT_ATA, srb->cmnd, 12, 1647 srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0);
1648 (srb->cmnd[0]==GPCMD_BLANK ? 75 : 10), 0) != 1648
1649 USB_STOR_TRANSPORT_GOOD)) { 1649 if (result != USB_STOR_TRANSPORT_GOOD)
1650 return result; 1650 return result;
1651 }
1652 1651
1653 /* If there is response data to be read in then do it here. */ 1652 /* If there is response data to be read in then do it here. */
1654 1653
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index cc313d16d727..468038126e5e 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -47,6 +47,8 @@
47#include <linux/errno.h> 47#include <linux/errno.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49 49
50#include <linux/usb/quirks.h>
51
50#include <scsi/scsi.h> 52#include <scsi/scsi.h>
51#include <scsi/scsi_eh.h> 53#include <scsi/scsi_eh.h>
52#include <scsi/scsi_device.h> 54#include <scsi/scsi_device.h>
@@ -1297,6 +1299,10 @@ int usb_stor_port_reset(struct us_data *us)
1297{ 1299{
1298 int result; 1300 int result;
1299 1301
1302 /*for these devices we must use the class specific method */
1303 if (us->pusb_dev->quirks & USB_QUIRK_RESET_MORPHS)
1304 return -EPERM;
1305
1300 result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf); 1306 result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
1301 if (result < 0) 1307 if (result < 0)
1302 US_DEBUGP("unable to lock device for reset: %d\n", result); 1308 US_DEBUGP("unable to lock device for reset: %d\n", result);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 49575fba3756..98b549b1cab2 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1147,8 +1147,8 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
1147 0 ), 1147 0 ),
1148 1148
1149/* Reported by Jan Dumon <j.dumon@option.com> 1149/* Reported by Jan Dumon <j.dumon@option.com>
1150 * This device (wrongly) has a vendor-specific device descriptor. 1150 * These devices (wrongly) have a vendor-specific device descriptor.
1151 * The entry is needed so usb-storage can bind to it's mass-storage 1151 * These entries are needed so usb-storage can bind to their mass-storage
1152 * interface as an interface driver */ 1152 * interface as an interface driver */
1153UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000, 1153UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
1154 "Option", 1154 "Option",
@@ -1156,6 +1156,90 @@ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
1156 US_SC_DEVICE, US_PR_DEVICE, NULL, 1156 US_SC_DEVICE, US_PR_DEVICE, NULL,
1157 0 ), 1157 0 ),
1158 1158
1159UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
1160 "Option",
1161 "GI 0451 SD-Card",
1162 US_SC_DEVICE, US_PR_DEVICE, NULL,
1163 0 ),
1164
1165UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
1166 "Option",
1167 "GI 0451 SD-Card",
1168 US_SC_DEVICE, US_PR_DEVICE, NULL,
1169 0 ),
1170
1171UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
1172 "Option",
1173 "GI 0452 SD-Card",
1174 US_SC_DEVICE, US_PR_DEVICE, NULL,
1175 0 ),
1176
1177UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
1178 "Option",
1179 "GI 0461 SD-Card",
1180 US_SC_DEVICE, US_PR_DEVICE, NULL,
1181 0 ),
1182
1183UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
1184 "Option",
1185 "GI 0461 SD-Card",
1186 US_SC_DEVICE, US_PR_DEVICE, NULL,
1187 0 ),
1188
1189UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
1190 "Option",
1191 "GI 033x SD-Card",
1192 US_SC_DEVICE, US_PR_DEVICE, NULL,
1193 0 ),
1194
1195UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
1196 "Option",
1197 "GI 033x SD-Card",
1198 US_SC_DEVICE, US_PR_DEVICE, NULL,
1199 0 ),
1200
1201UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
1202 "Option",
1203 "GI 033x SD-Card",
1204 US_SC_DEVICE, US_PR_DEVICE, NULL,
1205 0 ),
1206
1207UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
1208 "Option",
1209 "GI 070x SD-Card",
1210 US_SC_DEVICE, US_PR_DEVICE, NULL,
1211 0 ),
1212
1213UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
1214 "Option",
1215 "GI 1505 SD-Card",
1216 US_SC_DEVICE, US_PR_DEVICE, NULL,
1217 0 ),
1218
1219UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
1220 "Option",
1221 "GI 1509 SD-Card",
1222 US_SC_DEVICE, US_PR_DEVICE, NULL,
1223 0 ),
1224
1225UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
1226 "Option",
1227 "GI 1515 SD-Card",
1228 US_SC_DEVICE, US_PR_DEVICE, NULL,
1229 0 ),
1230
1231UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
1232 "Option",
1233 "GI 1215 SD-Card",
1234 US_SC_DEVICE, US_PR_DEVICE, NULL,
1235 0 ),
1236
1237UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
1238 "Option",
1239 "GI 1505 SD-Card",
1240 US_SC_DEVICE, US_PR_DEVICE, NULL,
1241 0 ),
1242
1159/* Reported by Ben Efros <ben@pc-doctor.com> */ 1243/* Reported by Ben Efros <ben@pc-doctor.com> */
1160UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, 1244UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
1161 "Seagate", 1245 "Seagate",
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b1e579c5c97c..61522787f39c 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -28,7 +28,7 @@
28#define USB_SKEL_PRODUCT_ID 0xfff0 28#define USB_SKEL_PRODUCT_ID 0xfff0
29 29
30/* table of devices that work with this driver */ 30/* table of devices that work with this driver */
31static struct usb_device_id skel_table[] = { 31static const struct usb_device_id skel_table[] = {
32 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, 32 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
33 { } /* Terminating entry */ 33 { } /* Terminating entry */
34}; 34};
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 25eae405f622..51a8e0d5789d 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -641,7 +641,7 @@ static void cbaf_disconnect(struct usb_interface *iface)
641 kzfree(cbaf); 641 kzfree(cbaf);
642} 642}
643 643
644static struct usb_device_id cbaf_id_table[] = { 644static const struct usb_device_id cbaf_id_table[] = {
645 { USB_INTERFACE_INFO(0xef, 0x03, 0x01), }, 645 { USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
646 { }, 646 { },
647}; 647};
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index dced419f7aba..1c918286159c 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -868,7 +868,7 @@ static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
868 * reference that we'll drop. 868 * reference that we'll drop.
869 * 869 *
870 * First we need to determine if the device is a WUSB device (else we 870 * First we need to determine if the device is a WUSB device (else we
871 * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE) 871 * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS)
872 * [FIXME: maybe we'd need something more definitive]. If so, we track 872 * [FIXME: maybe we'd need something more definitive]. If so, we track
873 * it's usb_busd and from there, the WUSB HC. 873 * it's usb_busd and from there, the WUSB HC.
874 * 874 *
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 3b52161e6e9c..2d827397e30b 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -263,7 +263,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
263{ 263{
264 int result = 0; 264 int result = 0;
265 265
266 if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0) 266 if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
267 chid = NULL; 267 chid = NULL;
268 268
269 mutex_lock(&wusbhc->mutex); 269 mutex_lock(&wusbhc->mutex);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3681c6a88212..b0a3fa00706d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3025,6 +3025,20 @@ static int fbcon_fb_unregistered(struct fb_info *info)
3025 return 0; 3025 return 0;
3026} 3026}
3027 3027
3028static void fbcon_remap_all(int idx)
3029{
3030 int i;
3031 for (i = first_fb_vc; i <= last_fb_vc; i++)
3032 set_con2fb_map(i, idx, 0);
3033
3034 if (con_is_bound(&fb_con)) {
3035 printk(KERN_INFO "fbcon: Remapping primary device, "
3036 "fb%i, to tty %i-%i\n", idx,
3037 first_fb_vc + 1, last_fb_vc + 1);
3038 info_idx = idx;
3039 }
3040}
3041
3028#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY 3042#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
3029static void fbcon_select_primary(struct fb_info *info) 3043static void fbcon_select_primary(struct fb_info *info)
3030{ 3044{
@@ -3225,6 +3239,10 @@ static int fbcon_event_notify(struct notifier_block *self,
3225 caps = event->data; 3239 caps = event->data;
3226 fbcon_get_requirement(info, caps); 3240 fbcon_get_requirement(info, caps);
3227 break; 3241 break;
3242 case FB_EVENT_REMAP_ALL_CONSOLE:
3243 idx = info->node;
3244 fbcon_remap_all(idx);
3245 break;
3228 } 3246 }
3229done: 3247done:
3230 return ret; 3248 return ret;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 99bbd282ce63..a15b44e9c003 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1513,7 +1513,6 @@ register_framebuffer(struct fb_info *fb_info)
1513 fb_info->fix.id, 1513 fb_info->fix.id,
1514 registered_fb[i]->fix.id); 1514 registered_fb[i]->fix.id);
1515 unregister_framebuffer(registered_fb[i]); 1515 unregister_framebuffer(registered_fb[i]);
1516 break;
1517 } 1516 }
1518 } 1517 }
1519 } 1518 }
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ce602dd09bc1..2f8413794d05 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
649 int bit_idx = __ffs(pending_bits); 649 int bit_idx = __ffs(pending_bits);
650 int port = (word_idx * BITS_PER_LONG) + bit_idx; 650 int port = (word_idx * BITS_PER_LONG) + bit_idx;
651 int irq = evtchn_to_irq[port]; 651 int irq = evtchn_to_irq[port];
652 struct irq_desc *desc;
652 653
653 if (irq != -1) 654 if (irq != -1) {
654 handle_irq(irq, regs); 655 desc = irq_to_desc(irq);
656 if (desc)
657 generic_handle_irq_desc(irq, desc);
658 }
655 } 659 }
656 } 660 }
657 661
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 9cc18775b832..2ff622f6f547 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -121,7 +121,7 @@ struct adfs_discmap {
121 121
122/* Inode stuff */ 122/* Inode stuff */
123struct inode *adfs_iget(struct super_block *sb, struct object_info *obj); 123struct inode *adfs_iget(struct super_block *sb, struct object_info *obj);
124int adfs_write_inode(struct inode *inode,int unused); 124int adfs_write_inode(struct inode *inode, struct writeback_control *wbc);
125int adfs_notify_change(struct dentry *dentry, struct iattr *attr); 125int adfs_notify_change(struct dentry *dentry, struct iattr *attr);
126 126
127/* map.c */ 127/* map.c */
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 3f57ce4bee5d..0f5e30978135 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/smp_lock.h> 10#include <linux/smp_lock.h>
11#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include <linux/writeback.h>
12#include "adfs.h" 13#include "adfs.h"
13 14
14/* 15/*
@@ -360,7 +361,7 @@ out:
360 * The adfs-specific inode data has already been updated by 361 * The adfs-specific inode data has already been updated by
361 * adfs_notify_change() 362 * adfs_notify_change()
362 */ 363 */
363int adfs_write_inode(struct inode *inode, int wait) 364int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
364{ 365{
365 struct super_block *sb = inode->i_sb; 366 struct super_block *sb = inode->i_sb;
366 struct object_info obj; 367 struct object_info obj;
@@ -375,7 +376,7 @@ int adfs_write_inode(struct inode *inode, int wait)
375 obj.attr = ADFS_I(inode)->attr; 376 obj.attr = ADFS_I(inode)->attr;
376 obj.size = inode->i_size; 377 obj.size = inode->i_size;
377 378
378 ret = adfs_dir_update(sb, &obj, wait); 379 ret = adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL);
379 unlock_kernel(); 380 unlock_kernel();
380 return ret; 381 return ret;
381} 382}
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 0e40caaba456..861dae68ac12 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -175,7 +175,8 @@ extern void affs_delete_inode(struct inode *inode);
175extern void affs_clear_inode(struct inode *inode); 175extern void affs_clear_inode(struct inode *inode);
176extern struct inode *affs_iget(struct super_block *sb, 176extern struct inode *affs_iget(struct super_block *sb,
177 unsigned long ino); 177 unsigned long ino);
178extern int affs_write_inode(struct inode *inode, int); 178extern int affs_write_inode(struct inode *inode,
179 struct writeback_control *wbc);
179extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type); 180extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type);
180 181
181/* file.c */ 182/* file.c */
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 3c4ec7d864c4..c9744d771d98 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -166,7 +166,7 @@ bad_inode:
166} 166}
167 167
168int 168int
169affs_write_inode(struct inode *inode, int unused) 169affs_write_inode(struct inode *inode, struct writeback_control *wbc)
170{ 170{
171 struct super_block *sb = inode->i_sb; 171 struct super_block *sb = inode->i_sb;
172 struct buffer_head *bh; 172 struct buffer_head *bh;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6ece2a13bf71..c54dad4e6063 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -733,7 +733,6 @@ extern int afs_write_end(struct file *file, struct address_space *mapping,
733 struct page *page, void *fsdata); 733 struct page *page, void *fsdata);
734extern int afs_writepage(struct page *, struct writeback_control *); 734extern int afs_writepage(struct page *, struct writeback_control *);
735extern int afs_writepages(struct address_space *, struct writeback_control *); 735extern int afs_writepages(struct address_space *, struct writeback_control *);
736extern int afs_write_inode(struct inode *, int);
737extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); 736extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
738extern ssize_t afs_file_write(struct kiocb *, const struct iovec *, 737extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
739 unsigned long, loff_t); 738 unsigned long, loff_t);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index e1ea1c240b6a..14f6431598ad 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -48,7 +48,6 @@ struct file_system_type afs_fs_type = {
48static const struct super_operations afs_super_ops = { 48static const struct super_operations afs_super_ops = {
49 .statfs = afs_statfs, 49 .statfs = afs_statfs,
50 .alloc_inode = afs_alloc_inode, 50 .alloc_inode = afs_alloc_inode,
51 .write_inode = afs_write_inode,
52 .destroy_inode = afs_destroy_inode, 51 .destroy_inode = afs_destroy_inode,
53 .clear_inode = afs_clear_inode, 52 .clear_inode = afs_clear_inode,
54 .put_super = afs_put_super, 53 .put_super = afs_put_super,
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 5e15a21dbf9f..3bed54a294d4 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -585,27 +585,6 @@ int afs_writepages(struct address_space *mapping,
585} 585}
586 586
587/* 587/*
588 * write an inode back
589 */
590int afs_write_inode(struct inode *inode, int sync)
591{
592 struct afs_vnode *vnode = AFS_FS_I(inode);
593 int ret;
594
595 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
596
597 ret = 0;
598 if (sync) {
599 ret = filemap_fdatawait(inode->i_mapping);
600 if (ret < 0)
601 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
602 }
603
604 _leave(" = %d", ret);
605 return ret;
606}
607
608/*
609 * completion of write to server 588 * completion of write to server
610 */ 589 */
611void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) 590void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 0118d67221b2..3d283abf67d7 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -60,11 +60,6 @@ do { \
60 current->pid, __func__, ##args); \ 60 current->pid, __func__, ##args); \
61} while (0) 61} while (0)
62 62
63struct rehash_entry {
64 struct task_struct *task;
65 struct list_head list;
66};
67
68/* Unified info structure. This is pointed to by both the dentry and 63/* Unified info structure. This is pointed to by both the dentry and
69 inode structures. Each file in the filesystem has an instance of this 64 inode structures. Each file in the filesystem has an instance of this
70 structure. It holds a reference to the dentry, so dentries are never 65 structure. It holds a reference to the dentry, so dentries are never
@@ -81,7 +76,6 @@ struct autofs_info {
81 76
82 struct list_head active; 77 struct list_head active;
83 int active_count; 78 int active_count;
84 struct list_head rehash_list;
85 79
86 struct list_head expiring; 80 struct list_head expiring;
87 81
@@ -104,7 +98,6 @@ struct autofs_info {
104#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ 98#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
105#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */ 99#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
106#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ 100#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
107#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */
108 101
109struct autofs_wait_queue { 102struct autofs_wait_queue {
110 wait_queue_head_t queue; 103 wait_queue_head_t queue;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 00bf8fcb245f..c8a80dffb455 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -544,10 +544,9 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
544 goto out; 544 goto out;
545 devid = new_encode_dev(path.mnt->mnt_sb->s_dev); 545 devid = new_encode_dev(path.mnt->mnt_sb->s_dev);
546 err = 0; 546 err = 0;
547 if (path.dentry->d_inode && 547 if (path.mnt->mnt_root == path.dentry) {
548 path.mnt->mnt_root == path.dentry) {
549 err = 1; 548 err = 1;
550 magic = path.dentry->d_inode->i_sb->s_magic; 549 magic = path.mnt->mnt_sb->s_magic;
551 } 550 }
552 } else { 551 } else {
553 dev_t dev = sbi->sb->s_dev; 552 dev_t dev = sbi->sb->s_dev;
@@ -560,10 +559,8 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
560 559
561 err = have_submounts(path.dentry); 560 err = have_submounts(path.dentry);
562 561
563 if (path.mnt->mnt_mountpoint != path.mnt->mnt_root) { 562 if (follow_down(&path))
564 if (follow_down(&path)) 563 magic = path.mnt->mnt_sb->s_magic;
565 magic = path.mnt->mnt_sb->s_magic;
566 }
567 } 564 }
568 565
569 param->ismountpoint.out.devid = devid; 566 param->ismountpoint.out.devid = devid;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 74bc9aa6df31..a796c9417fb1 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -279,7 +279,6 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
279 root->d_mounted--; 279 root->d_mounted--;
280 } 280 }
281 ino->flags |= AUTOFS_INF_EXPIRING; 281 ino->flags |= AUTOFS_INF_EXPIRING;
282 autofs4_add_expiring(root);
283 init_completion(&ino->expire_complete); 282 init_completion(&ino->expire_complete);
284 spin_unlock(&sbi->fs_lock); 283 spin_unlock(&sbi->fs_lock);
285 return root; 284 return root;
@@ -407,7 +406,6 @@ found:
407 expired, (int)expired->d_name.len, expired->d_name.name); 406 expired, (int)expired->d_name.len, expired->d_name.name);
408 ino = autofs4_dentry_ino(expired); 407 ino = autofs4_dentry_ino(expired);
409 ino->flags |= AUTOFS_INF_EXPIRING; 408 ino->flags |= AUTOFS_INF_EXPIRING;
410 autofs4_add_expiring(expired);
411 init_completion(&ino->expire_complete); 409 init_completion(&ino->expire_complete);
412 spin_unlock(&sbi->fs_lock); 410 spin_unlock(&sbi->fs_lock);
413 spin_lock(&dcache_lock); 411 spin_lock(&dcache_lock);
@@ -435,7 +433,7 @@ int autofs4_expire_wait(struct dentry *dentry)
435 433
436 DPRINTK("expire done status=%d", status); 434 DPRINTK("expire done status=%d", status);
437 435
438 if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode)) 436 if (d_unhashed(dentry))
439 return -EAGAIN; 437 return -EAGAIN;
440 438
441 return status; 439 return status;
@@ -475,7 +473,6 @@ int autofs4_expire_run(struct super_block *sb,
475 spin_lock(&sbi->fs_lock); 473 spin_lock(&sbi->fs_lock);
476 ino = autofs4_dentry_ino(dentry); 474 ino = autofs4_dentry_ino(dentry);
477 ino->flags &= ~AUTOFS_INF_EXPIRING; 475 ino->flags &= ~AUTOFS_INF_EXPIRING;
478 autofs4_del_expiring(dentry);
479 complete_all(&ino->expire_complete); 476 complete_all(&ino->expire_complete);
480 spin_unlock(&sbi->fs_lock); 477 spin_unlock(&sbi->fs_lock);
481 478
@@ -506,7 +503,6 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
506 ino->flags &= ~AUTOFS_INF_MOUNTPOINT; 503 ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
507 } 504 }
508 ino->flags &= ~AUTOFS_INF_EXPIRING; 505 ino->flags &= ~AUTOFS_INF_EXPIRING;
509 autofs4_del_expiring(dentry);
510 complete_all(&ino->expire_complete); 506 complete_all(&ino->expire_complete);
511 spin_unlock(&sbi->fs_lock); 507 spin_unlock(&sbi->fs_lock);
512 dput(dentry); 508 dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index d0a3de247458..821b2b955dac 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -49,7 +49,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
49 ino->dentry = NULL; 49 ino->dentry = NULL;
50 ino->size = 0; 50 ino->size = 0;
51 INIT_LIST_HEAD(&ino->active); 51 INIT_LIST_HEAD(&ino->active);
52 INIT_LIST_HEAD(&ino->rehash_list);
53 ino->active_count = 0; 52 ino->active_count = 0;
54 INIT_LIST_HEAD(&ino->expiring); 53 INIT_LIST_HEAD(&ino->expiring);
55 atomic_set(&ino->count, 0); 54 atomic_set(&ino->count, 0);
@@ -97,63 +96,6 @@ void autofs4_free_ino(struct autofs_info *ino)
97 kfree(ino); 96 kfree(ino);
98} 97}
99 98
100/*
101 * Deal with the infamous "Busy inodes after umount ..." message.
102 *
103 * Clean up the dentry tree. This happens with autofs if the user
104 * space program goes away due to a SIGKILL, SIGSEGV etc.
105 */
106static void autofs4_force_release(struct autofs_sb_info *sbi)
107{
108 struct dentry *this_parent = sbi->sb->s_root;
109 struct list_head *next;
110
111 if (!sbi->sb->s_root)
112 return;
113
114 spin_lock(&dcache_lock);
115repeat:
116 next = this_parent->d_subdirs.next;
117resume:
118 while (next != &this_parent->d_subdirs) {
119 struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
120
121 /* Negative dentry - don`t care */
122 if (!simple_positive(dentry)) {
123 next = next->next;
124 continue;
125 }
126
127 if (!list_empty(&dentry->d_subdirs)) {
128 this_parent = dentry;
129 goto repeat;
130 }
131
132 next = next->next;
133 spin_unlock(&dcache_lock);
134
135 DPRINTK("dentry %p %.*s",
136 dentry, (int)dentry->d_name.len, dentry->d_name.name);
137
138 dput(dentry);
139 spin_lock(&dcache_lock);
140 }
141
142 if (this_parent != sbi->sb->s_root) {
143 struct dentry *dentry = this_parent;
144
145 next = this_parent->d_u.d_child.next;
146 this_parent = this_parent->d_parent;
147 spin_unlock(&dcache_lock);
148 DPRINTK("parent dentry %p %.*s",
149 dentry, (int)dentry->d_name.len, dentry->d_name.name);
150 dput(dentry);
151 spin_lock(&dcache_lock);
152 goto resume;
153 }
154 spin_unlock(&dcache_lock);
155}
156
157void autofs4_kill_sb(struct super_block *sb) 99void autofs4_kill_sb(struct super_block *sb)
158{ 100{
159 struct autofs_sb_info *sbi = autofs4_sbi(sb); 101 struct autofs_sb_info *sbi = autofs4_sbi(sb);
@@ -170,15 +112,12 @@ void autofs4_kill_sb(struct super_block *sb)
170 /* Free wait queues, close pipe */ 112 /* Free wait queues, close pipe */
171 autofs4_catatonic_mode(sbi); 113 autofs4_catatonic_mode(sbi);
172 114
173 /* Clean up and release dangling references */
174 autofs4_force_release(sbi);
175
176 sb->s_fs_info = NULL; 115 sb->s_fs_info = NULL;
177 kfree(sbi); 116 kfree(sbi);
178 117
179out_kill_sb: 118out_kill_sb:
180 DPRINTK("shutting down"); 119 DPRINTK("shutting down");
181 kill_anon_super(sb); 120 kill_litter_super(sb);
182} 121}
183 122
184static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) 123static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 30cc9ddf4b70..a015b49891df 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -104,99 +104,6 @@ static void autofs4_del_active(struct dentry *dentry)
104 return; 104 return;
105} 105}
106 106
107static void autofs4_add_rehash_entry(struct autofs_info *ino,
108 struct rehash_entry *entry)
109{
110 entry->task = current;
111 INIT_LIST_HEAD(&entry->list);
112 list_add(&entry->list, &ino->rehash_list);
113 return;
114}
115
116static void autofs4_remove_rehash_entry(struct autofs_info *ino)
117{
118 struct list_head *head = &ino->rehash_list;
119 struct rehash_entry *entry;
120 list_for_each_entry(entry, head, list) {
121 if (entry->task == current) {
122 list_del(&entry->list);
123 kfree(entry);
124 break;
125 }
126 }
127 return;
128}
129
130static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
131{
132 struct autofs_sb_info *sbi = ino->sbi;
133 struct rehash_entry *entry, *next;
134 struct list_head *head;
135
136 spin_lock(&sbi->fs_lock);
137 spin_lock(&sbi->lookup_lock);
138 if (!(ino->flags & AUTOFS_INF_REHASH)) {
139 spin_unlock(&sbi->lookup_lock);
140 spin_unlock(&sbi->fs_lock);
141 return;
142 }
143 ino->flags &= ~AUTOFS_INF_REHASH;
144 head = &ino->rehash_list;
145 list_for_each_entry_safe(entry, next, head, list) {
146 list_del(&entry->list);
147 kfree(entry);
148 }
149 spin_unlock(&sbi->lookup_lock);
150 spin_unlock(&sbi->fs_lock);
151 dput(ino->dentry);
152
153 return;
154}
155
156static void autofs4_revalidate_drop(struct dentry *dentry,
157 struct rehash_entry *entry)
158{
159 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
160 struct autofs_info *ino = autofs4_dentry_ino(dentry);
161 /*
162 * Add to the active list so we can pick this up in
163 * ->lookup(). Also add an entry to a rehash list so
164 * we know when there are no dentrys in flight so we
165 * know when we can rehash the dentry.
166 */
167 spin_lock(&sbi->lookup_lock);
168 if (list_empty(&ino->active))
169 list_add(&ino->active, &sbi->active_list);
170 autofs4_add_rehash_entry(ino, entry);
171 spin_unlock(&sbi->lookup_lock);
172 if (!(ino->flags & AUTOFS_INF_REHASH)) {
173 ino->flags |= AUTOFS_INF_REHASH;
174 dget(dentry);
175 spin_lock(&dentry->d_lock);
176 __d_drop(dentry);
177 spin_unlock(&dentry->d_lock);
178 }
179 return;
180}
181
182static void autofs4_revalidate_rehash(struct dentry *dentry)
183{
184 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
185 struct autofs_info *ino = autofs4_dentry_ino(dentry);
186 if (ino->flags & AUTOFS_INF_REHASH) {
187 spin_lock(&sbi->lookup_lock);
188 autofs4_remove_rehash_entry(ino);
189 if (list_empty(&ino->rehash_list)) {
190 spin_unlock(&sbi->lookup_lock);
191 ino->flags &= ~AUTOFS_INF_REHASH;
192 d_rehash(dentry);
193 dput(ino->dentry);
194 } else
195 spin_unlock(&sbi->lookup_lock);
196 }
197 return;
198}
199
200static unsigned int autofs4_need_mount(unsigned int flags) 107static unsigned int autofs4_need_mount(unsigned int flags)
201{ 108{
202 unsigned int res = 0; 109 unsigned int res = 0;
@@ -236,7 +143,7 @@ out:
236 return dcache_dir_open(inode, file); 143 return dcache_dir_open(inode, file);
237} 144}
238 145
239static int try_to_fill_dentry(struct dentry *dentry) 146static int try_to_fill_dentry(struct dentry *dentry, int flags)
240{ 147{
241 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 148 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
242 struct autofs_info *ino = autofs4_dentry_ino(dentry); 149 struct autofs_info *ino = autofs4_dentry_ino(dentry);
@@ -249,17 +156,55 @@ static int try_to_fill_dentry(struct dentry *dentry)
249 * Wait for a pending mount, triggering one if there 156 * Wait for a pending mount, triggering one if there
250 * isn't one already 157 * isn't one already
251 */ 158 */
252 DPRINTK("waiting for mount name=%.*s", 159 if (dentry->d_inode == NULL) {
253 dentry->d_name.len, dentry->d_name.name); 160 DPRINTK("waiting for mount name=%.*s",
161 dentry->d_name.len, dentry->d_name.name);
254 162
255 status = autofs4_wait(sbi, dentry, NFY_MOUNT); 163 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
256 164
257 DPRINTK("mount done status=%d", status); 165 DPRINTK("mount done status=%d", status);
258 166
259 /* Update expiry counter */ 167 /* Turn this into a real negative dentry? */
260 ino->last_used = jiffies; 168 if (status == -ENOENT) {
169 spin_lock(&sbi->fs_lock);
170 ino->flags &= ~AUTOFS_INF_PENDING;
171 spin_unlock(&sbi->fs_lock);
172 return status;
173 } else if (status) {
174 /* Return a negative dentry, but leave it "pending" */
175 return status;
176 }
177 /* Trigger mount for path component or follow link */
178 } else if (ino->flags & AUTOFS_INF_PENDING ||
179 autofs4_need_mount(flags) ||
180 current->link_count) {
181 DPRINTK("waiting for mount name=%.*s",
182 dentry->d_name.len, dentry->d_name.name);
261 183
262 return status; 184 spin_lock(&sbi->fs_lock);
185 ino->flags |= AUTOFS_INF_PENDING;
186 spin_unlock(&sbi->fs_lock);
187 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
188
189 DPRINTK("mount done status=%d", status);
190
191 if (status) {
192 spin_lock(&sbi->fs_lock);
193 ino->flags &= ~AUTOFS_INF_PENDING;
194 spin_unlock(&sbi->fs_lock);
195 return status;
196 }
197 }
198
199 /* Initialize expiry counter after successful mount */
200 if (ino)
201 ino->last_used = jiffies;
202
203 spin_lock(&sbi->fs_lock);
204 ino->flags &= ~AUTOFS_INF_PENDING;
205 spin_unlock(&sbi->fs_lock);
206
207 return 0;
263} 208}
264 209
265/* For autofs direct mounts the follow link triggers the mount */ 210/* For autofs direct mounts the follow link triggers the mount */
@@ -313,16 +258,10 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
313 */ 258 */
314 if (ino->flags & AUTOFS_INF_PENDING || 259 if (ino->flags & AUTOFS_INF_PENDING ||
315 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) { 260 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
316 ino->flags |= AUTOFS_INF_PENDING;
317 spin_unlock(&dcache_lock); 261 spin_unlock(&dcache_lock);
318 spin_unlock(&sbi->fs_lock); 262 spin_unlock(&sbi->fs_lock);
319 263
320 status = try_to_fill_dentry(dentry); 264 status = try_to_fill_dentry(dentry, 0);
321
322 spin_lock(&sbi->fs_lock);
323 ino->flags &= ~AUTOFS_INF_PENDING;
324 spin_unlock(&sbi->fs_lock);
325
326 if (status) 265 if (status)
327 goto out_error; 266 goto out_error;
328 267
@@ -361,47 +300,18 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
361{ 300{
362 struct inode *dir = dentry->d_parent->d_inode; 301 struct inode *dir = dentry->d_parent->d_inode;
363 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 302 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
364 struct autofs_info *ino = autofs4_dentry_ino(dentry); 303 int oz_mode = autofs4_oz_mode(sbi);
365 struct rehash_entry *entry;
366 int flags = nd ? nd->flags : 0; 304 int flags = nd ? nd->flags : 0;
367 unsigned int mutex_aquired; 305 int status = 1;
368 306
369 DPRINTK("name = %.*s oz_mode = %d",
370 dentry->d_name.len, dentry->d_name.name, oz_mode);
371
372 /* Daemon never causes a mount to trigger */
373 if (autofs4_oz_mode(sbi))
374 return 1;
375
376 entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
377 if (!entry)
378 return -ENOMEM;
379
380 mutex_aquired = mutex_trylock(&dir->i_mutex);
381
382 spin_lock(&sbi->fs_lock);
383 spin_lock(&dcache_lock);
384 /* Pending dentry */ 307 /* Pending dentry */
308 spin_lock(&sbi->fs_lock);
385 if (autofs4_ispending(dentry)) { 309 if (autofs4_ispending(dentry)) {
386 int status; 310 /* The daemon never causes a mount to trigger */
387
388 /*
389 * We can only unhash and send this to ->lookup() if
390 * the directory mutex is held over d_revalidate() and
391 * ->lookup(). This prevents the VFS from incorrectly
392 * seeing the dentry as non-existent.
393 */
394 ino->flags |= AUTOFS_INF_PENDING;
395 if (!mutex_aquired) {
396 autofs4_revalidate_drop(dentry, entry);
397 spin_unlock(&dcache_lock);
398 spin_unlock(&sbi->fs_lock);
399 return 0;
400 }
401 spin_unlock(&dcache_lock);
402 spin_unlock(&sbi->fs_lock); 311 spin_unlock(&sbi->fs_lock);
403 mutex_unlock(&dir->i_mutex); 312
404 kfree(entry); 313 if (oz_mode)
314 return 1;
405 315
406 /* 316 /*
407 * If the directory has gone away due to an expire 317 * If the directory has gone away due to an expire
@@ -415,82 +325,45 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
415 * A zero status is success otherwise we have a 325 * A zero status is success otherwise we have a
416 * negative error code. 326 * negative error code.
417 */ 327 */
418 status = try_to_fill_dentry(dentry); 328 status = try_to_fill_dentry(dentry, flags);
419
420 spin_lock(&sbi->fs_lock);
421 ino->flags &= ~AUTOFS_INF_PENDING;
422 spin_unlock(&sbi->fs_lock);
423
424 if (status == 0) 329 if (status == 0)
425 return 1; 330 return 1;
426 331
427 return status; 332 return status;
428 } 333 }
334 spin_unlock(&sbi->fs_lock);
335
336 /* Negative dentry.. invalidate if "old" */
337 if (dentry->d_inode == NULL)
338 return 0;
429 339
430 /* Check for a non-mountpoint directory with no contents */ 340 /* Check for a non-mountpoint directory with no contents */
341 spin_lock(&dcache_lock);
431 if (S_ISDIR(dentry->d_inode->i_mode) && 342 if (S_ISDIR(dentry->d_inode->i_mode) &&
432 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { 343 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
433 DPRINTK("dentry=%p %.*s, emptydir", 344 DPRINTK("dentry=%p %.*s, emptydir",
434 dentry, dentry->d_name.len, dentry->d_name.name); 345 dentry, dentry->d_name.len, dentry->d_name.name);
346 spin_unlock(&dcache_lock);
435 347
436 if (autofs4_need_mount(flags) || current->link_count) { 348 /* The daemon never causes a mount to trigger */
437 int status; 349 if (oz_mode)
438 350 return 1;
439 /*
440 * We can only unhash and send this to ->lookup() if
441 * the directory mutex is held over d_revalidate() and
442 * ->lookup(). This prevents the VFS from incorrectly
443 * seeing the dentry as non-existent.
444 */
445 ino->flags |= AUTOFS_INF_PENDING;
446 if (!mutex_aquired) {
447 autofs4_revalidate_drop(dentry, entry);
448 spin_unlock(&dcache_lock);
449 spin_unlock(&sbi->fs_lock);
450 return 0;
451 }
452 spin_unlock(&dcache_lock);
453 spin_unlock(&sbi->fs_lock);
454 mutex_unlock(&dir->i_mutex);
455 kfree(entry);
456
457 /*
458 * A zero status is success otherwise we have a
459 * negative error code.
460 */
461 status = try_to_fill_dentry(dentry);
462
463 spin_lock(&sbi->fs_lock);
464 ino->flags &= ~AUTOFS_INF_PENDING;
465 spin_unlock(&sbi->fs_lock);
466 351
467 if (status == 0) 352 /*
468 return 1; 353 * A zero status is success otherwise we have a
354 * negative error code.
355 */
356 status = try_to_fill_dentry(dentry, flags);
357 if (status == 0)
358 return 1;
469 359
470 return status; 360 return status;
471 }
472 } 361 }
473 spin_unlock(&dcache_lock); 362 spin_unlock(&dcache_lock);
474 spin_unlock(&sbi->fs_lock);
475
476 if (mutex_aquired)
477 mutex_unlock(&dir->i_mutex);
478
479 kfree(entry);
480 363
481 return 1; 364 return 1;
482} 365}
483 366
484static void autofs4_free_rehash_entrys(struct autofs_info *inf)
485{
486 struct list_head *head = &inf->rehash_list;
487 struct rehash_entry *entry, *next;
488 list_for_each_entry_safe(entry, next, head, list) {
489 list_del(&entry->list);
490 kfree(entry);
491 }
492}
493
494void autofs4_dentry_release(struct dentry *de) 367void autofs4_dentry_release(struct dentry *de)
495{ 368{
496 struct autofs_info *inf; 369 struct autofs_info *inf;
@@ -509,8 +382,6 @@ void autofs4_dentry_release(struct dentry *de)
509 list_del(&inf->active); 382 list_del(&inf->active);
510 if (!list_empty(&inf->expiring)) 383 if (!list_empty(&inf->expiring))
511 list_del(&inf->expiring); 384 list_del(&inf->expiring);
512 if (!list_empty(&inf->rehash_list))
513 autofs4_free_rehash_entrys(inf);
514 spin_unlock(&sbi->lookup_lock); 385 spin_unlock(&sbi->lookup_lock);
515 } 386 }
516 387
@@ -543,7 +414,6 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
543 const unsigned char *str = name->name; 414 const unsigned char *str = name->name;
544 struct list_head *p, *head; 415 struct list_head *p, *head;
545 416
546restart:
547 spin_lock(&dcache_lock); 417 spin_lock(&dcache_lock);
548 spin_lock(&sbi->lookup_lock); 418 spin_lock(&sbi->lookup_lock);
549 head = &sbi->active_list; 419 head = &sbi->active_list;
@@ -561,19 +431,6 @@ restart:
561 if (atomic_read(&active->d_count) == 0) 431 if (atomic_read(&active->d_count) == 0)
562 goto next; 432 goto next;
563 433
564 if (active->d_inode && IS_DEADDIR(active->d_inode)) {
565 if (!list_empty(&ino->rehash_list)) {
566 dget(active);
567 spin_unlock(&active->d_lock);
568 spin_unlock(&sbi->lookup_lock);
569 spin_unlock(&dcache_lock);
570 autofs4_remove_rehash_entrys(ino);
571 dput(active);
572 goto restart;
573 }
574 goto next;
575 }
576
577 qstr = &active->d_name; 434 qstr = &active->d_name;
578 435
579 if (active->d_name.hash != hash) 436 if (active->d_name.hash != hash)
@@ -586,11 +443,13 @@ restart:
586 if (memcmp(qstr->name, str, len)) 443 if (memcmp(qstr->name, str, len))
587 goto next; 444 goto next;
588 445
589 dget(active); 446 if (d_unhashed(active)) {
590 spin_unlock(&active->d_lock); 447 dget(active);
591 spin_unlock(&sbi->lookup_lock); 448 spin_unlock(&active->d_lock);
592 spin_unlock(&dcache_lock); 449 spin_unlock(&sbi->lookup_lock);
593 return active; 450 spin_unlock(&dcache_lock);
451 return active;
452 }
594next: 453next:
595 spin_unlock(&active->d_lock); 454 spin_unlock(&active->d_lock);
596 } 455 }
@@ -639,11 +498,13 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
639 if (memcmp(qstr->name, str, len)) 498 if (memcmp(qstr->name, str, len))
640 goto next; 499 goto next;
641 500
642 dget(expiring); 501 if (d_unhashed(expiring)) {
643 spin_unlock(&expiring->d_lock); 502 dget(expiring);
644 spin_unlock(&sbi->lookup_lock); 503 spin_unlock(&expiring->d_lock);
645 spin_unlock(&dcache_lock); 504 spin_unlock(&sbi->lookup_lock);
646 return expiring; 505 spin_unlock(&dcache_lock);
506 return expiring;
507 }
647next: 508next:
648 spin_unlock(&expiring->d_lock); 509 spin_unlock(&expiring->d_lock);
649 } 510 }
@@ -653,48 +514,6 @@ next:
653 return NULL; 514 return NULL;
654} 515}
655 516
656static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
657 struct dentry *dentry, int oz_mode)
658{
659 struct autofs_info *ino;
660
661 /*
662 * Mark the dentry incomplete but don't hash it. We do this
663 * to serialize our inode creation operations (symlink and
664 * mkdir) which prevents deadlock during the callback to
665 * the daemon. Subsequent user space lookups for the same
666 * dentry are placed on the wait queue while the daemon
667 * itself is allowed passage unresticted so the create
668 * operation itself can then hash the dentry. Finally,
669 * we check for the hashed dentry and return the newly
670 * hashed dentry.
671 */
672 dentry->d_op = &autofs4_root_dentry_operations;
673
674 /*
675 * And we need to ensure that the same dentry is used for
676 * all following lookup calls until it is hashed so that
677 * the dentry flags are persistent throughout the request.
678 */
679 ino = autofs4_init_ino(NULL, sbi, 0555);
680 if (!ino)
681 return ERR_PTR(-ENOMEM);
682
683 dentry->d_fsdata = ino;
684 ino->dentry = dentry;
685
686 /*
687 * Only set the mount pending flag for new dentrys not created
688 * by the daemon.
689 */
690 if (!oz_mode)
691 ino->flags |= AUTOFS_INF_PENDING;
692
693 d_instantiate(dentry, NULL);
694
695 return ino;
696}
697
698/* Lookups in the root directory */ 517/* Lookups in the root directory */
699static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 518static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
700{ 519{
@@ -702,7 +521,6 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
702 struct autofs_info *ino; 521 struct autofs_info *ino;
703 struct dentry *expiring, *active; 522 struct dentry *expiring, *active;
704 int oz_mode; 523 int oz_mode;
705 int status = 0;
706 524
707 DPRINTK("name = %.*s", 525 DPRINTK("name = %.*s",
708 dentry->d_name.len, dentry->d_name.name); 526 dentry->d_name.len, dentry->d_name.name);
@@ -717,26 +535,44 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
717 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", 535 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
718 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); 536 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
719 537
720 spin_lock(&sbi->fs_lock);
721 active = autofs4_lookup_active(dentry); 538 active = autofs4_lookup_active(dentry);
722 if (active) { 539 if (active) {
723 dentry = active; 540 dentry = active;
724 ino = autofs4_dentry_ino(dentry); 541 ino = autofs4_dentry_ino(dentry);
725 /* If this came from revalidate, rehash it */
726 autofs4_revalidate_rehash(dentry);
727 spin_unlock(&sbi->fs_lock);
728 } else { 542 } else {
729 spin_unlock(&sbi->fs_lock); 543 /*
730 ino = init_new_dentry(sbi, dentry, oz_mode); 544 * Mark the dentry incomplete but don't hash it. We do this
731 if (IS_ERR(ino)) 545 * to serialize our inode creation operations (symlink and
732 return (struct dentry *) ino; 546 * mkdir) which prevents deadlock during the callback to
733 } 547 * the daemon. Subsequent user space lookups for the same
548 * dentry are placed on the wait queue while the daemon
549 * itself is allowed passage unresticted so the create
550 * operation itself can then hash the dentry. Finally,
551 * we check for the hashed dentry and return the newly
552 * hashed dentry.
553 */
554 dentry->d_op = &autofs4_root_dentry_operations;
555
556 /*
557 * And we need to ensure that the same dentry is used for
558 * all following lookup calls until it is hashed so that
559 * the dentry flags are persistent throughout the request.
560 */
561 ino = autofs4_init_ino(NULL, sbi, 0555);
562 if (!ino)
563 return ERR_PTR(-ENOMEM);
734 564
735 autofs4_add_active(dentry); 565 dentry->d_fsdata = ino;
566 ino->dentry = dentry;
567
568 autofs4_add_active(dentry);
569
570 d_instantiate(dentry, NULL);
571 }
736 572
737 if (!oz_mode) { 573 if (!oz_mode) {
738 expiring = autofs4_lookup_expiring(dentry);
739 mutex_unlock(&dir->i_mutex); 574 mutex_unlock(&dir->i_mutex);
575 expiring = autofs4_lookup_expiring(dentry);
740 if (expiring) { 576 if (expiring) {
741 /* 577 /*
742 * If we are racing with expire the request might not 578 * If we are racing with expire the request might not
@@ -744,22 +580,23 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
744 * so it must have been successful, so just wait for it. 580 * so it must have been successful, so just wait for it.
745 */ 581 */
746 autofs4_expire_wait(expiring); 582 autofs4_expire_wait(expiring);
583 autofs4_del_expiring(expiring);
747 dput(expiring); 584 dput(expiring);
748 } 585 }
749 status = try_to_fill_dentry(dentry); 586
750 mutex_lock(&dir->i_mutex);
751 spin_lock(&sbi->fs_lock); 587 spin_lock(&sbi->fs_lock);
752 ino->flags &= ~AUTOFS_INF_PENDING; 588 ino->flags |= AUTOFS_INF_PENDING;
753 spin_unlock(&sbi->fs_lock); 589 spin_unlock(&sbi->fs_lock);
590 if (dentry->d_op && dentry->d_op->d_revalidate)
591 (dentry->d_op->d_revalidate)(dentry, nd);
592 mutex_lock(&dir->i_mutex);
754 } 593 }
755 594
756 autofs4_del_active(dentry);
757
758 /* 595 /*
759 * If we had a mount fail, check if we had to handle 596 * If we are still pending, check if we had to handle
760 * a signal. If so we can force a restart.. 597 * a signal. If so we can force a restart..
761 */ 598 */
762 if (status) { 599 if (ino->flags & AUTOFS_INF_PENDING) {
763 /* See if we were interrupted */ 600 /* See if we were interrupted */
764 if (signal_pending(current)) { 601 if (signal_pending(current)) {
765 sigset_t *sigset = &current->pending.signal; 602 sigset_t *sigset = &current->pending.signal;
@@ -771,46 +608,43 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
771 return ERR_PTR(-ERESTARTNOINTR); 608 return ERR_PTR(-ERESTARTNOINTR);
772 } 609 }
773 } 610 }
774 } 611 if (!oz_mode) {
775 612 spin_lock(&sbi->fs_lock);
776 /* 613 ino->flags &= ~AUTOFS_INF_PENDING;
777 * User space can (and has done in the past) remove and re-create 614 spin_unlock(&sbi->fs_lock);
778 * this directory during the callback. This can leave us with an
779 * unhashed dentry, but a successful mount! So we need to
780 * perform another cached lookup in case the dentry now exists.
781 */
782 if (!oz_mode && !have_submounts(dentry)) {
783 struct dentry *new;
784 new = d_lookup(dentry->d_parent, &dentry->d_name);
785 if (new) {
786 if (active)
787 dput(active);
788 return new;
789 } else {
790 if (!status)
791 status = -ENOENT;
792 } 615 }
793 } 616 }
794 617
795 /* 618 /*
796 * If we had a mount failure, return status to user space. 619 * If this dentry is unhashed, then we shouldn't honour this
797 * If the mount succeeded and we used a dentry from the active queue 620 * lookup. Returning ENOENT here doesn't do the right thing
798 * return it. 621 * for all system calls, but it should be OK for the operations
622 * we permit from an autofs.
799 */ 623 */
800 if (status) { 624 if (!oz_mode && d_unhashed(dentry)) {
801 dentry = ERR_PTR(status);
802 if (active)
803 dput(active);
804 return dentry;
805 } else {
806 /* 625 /*
807 * Valid successful mount, return active dentry or NULL 626 * A user space application can (and has done in the past)
808 * for a new dentry. 627 * remove and re-create this directory during the callback.
628 * This can leave us with an unhashed dentry, but a
629 * successful mount! So we need to perform another
630 * cached lookup in case the dentry now exists.
809 */ 631 */
632 struct dentry *parent = dentry->d_parent;
633 struct dentry *new = d_lookup(parent, &dentry->d_name);
634 if (new != NULL)
635 dentry = new;
636 else
637 dentry = ERR_PTR(-ENOENT);
638
810 if (active) 639 if (active)
811 return active; 640 dput(active);
641
642 return dentry;
812 } 643 }
813 644
645 if (active)
646 return active;
647
814 return NULL; 648 return NULL;
815} 649}
816 650
@@ -834,6 +668,8 @@ static int autofs4_dir_symlink(struct inode *dir,
834 if (!ino) 668 if (!ino)
835 return -ENOMEM; 669 return -ENOMEM;
836 670
671 autofs4_del_active(dentry);
672
837 ino->size = strlen(symname); 673 ino->size = strlen(symname);
838 cp = kmalloc(ino->size + 1, GFP_KERNEL); 674 cp = kmalloc(ino->size + 1, GFP_KERNEL);
839 if (!cp) { 675 if (!cp) {
@@ -910,6 +746,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
910 dir->i_mtime = CURRENT_TIME; 746 dir->i_mtime = CURRENT_TIME;
911 747
912 spin_lock(&dcache_lock); 748 spin_lock(&dcache_lock);
749 autofs4_add_expiring(dentry);
913 spin_lock(&dentry->d_lock); 750 spin_lock(&dentry->d_lock);
914 __d_drop(dentry); 751 __d_drop(dentry);
915 spin_unlock(&dentry->d_lock); 752 spin_unlock(&dentry->d_lock);
@@ -935,6 +772,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
935 spin_unlock(&dcache_lock); 772 spin_unlock(&dcache_lock);
936 return -ENOTEMPTY; 773 return -ENOTEMPTY;
937 } 774 }
775 autofs4_add_expiring(dentry);
938 spin_lock(&dentry->d_lock); 776 spin_lock(&dentry->d_lock);
939 __d_drop(dentry); 777 __d_drop(dentry);
940 spin_unlock(&dentry->d_lock); 778 spin_unlock(&dentry->d_lock);
@@ -972,6 +810,8 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
972 if (!ino) 810 if (!ino)
973 return -ENOMEM; 811 return -ENOMEM;
974 812
813 autofs4_del_active(dentry);
814
975 inode = autofs4_get_inode(dir->i_sb, ino); 815 inode = autofs4_get_inode(dir->i_sb, ino);
976 if (!inode) { 816 if (!inode) {
977 if (!dentry->d_fsdata) 817 if (!dentry->d_fsdata)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 8f3d9fd89604..f22a7d3dc362 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -15,6 +15,7 @@
15#include <linux/smp_lock.h> 15#include <linux/smp_lock.h>
16#include <linux/buffer_head.h> 16#include <linux/buffer_head.h>
17#include <linux/vfs.h> 17#include <linux/vfs.h>
18#include <linux/writeback.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19#include "bfs.h" 20#include "bfs.h"
20 21
@@ -98,7 +99,7 @@ error:
98 return ERR_PTR(-EIO); 99 return ERR_PTR(-EIO);
99} 100}
100 101
101static int bfs_write_inode(struct inode *inode, int wait) 102static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
102{ 103{
103 struct bfs_sb_info *info = BFS_SB(inode->i_sb); 104 struct bfs_sb_info *info = BFS_SB(inode->i_sb);
104 unsigned int ino = (u16)inode->i_ino; 105 unsigned int ino = (u16)inode->i_ino;
@@ -147,7 +148,7 @@ static int bfs_write_inode(struct inode *inode, int wait)
147 di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); 148 di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1);
148 149
149 mark_buffer_dirty(bh); 150 mark_buffer_dirty(bh);
150 if (wait) { 151 if (wbc->sync_mode == WB_SYNC_ALL) {
151 sync_dirty_buffer(bh); 152 sync_dirty_buffer(bh);
152 if (buffer_req(bh) && !buffer_uptodate(bh)) 153 if (buffer_req(bh) && !buffer_uptodate(bh))
153 err = -EIO; 154 err = -EIO;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2aa8ec6a0981..8b5cfdd4bfc1 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2326,7 +2326,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2326int btrfs_readpage(struct file *file, struct page *page); 2326int btrfs_readpage(struct file *file, struct page *page);
2327void btrfs_delete_inode(struct inode *inode); 2327void btrfs_delete_inode(struct inode *inode);
2328void btrfs_put_inode(struct inode *inode); 2328void btrfs_put_inode(struct inode *inode);
2329int btrfs_write_inode(struct inode *inode, int wait); 2329int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
2330void btrfs_dirty_inode(struct inode *inode); 2330void btrfs_dirty_inode(struct inode *inode);
2331struct inode *btrfs_alloc_inode(struct super_block *sb); 2331struct inode *btrfs_alloc_inode(struct super_block *sb);
2332void btrfs_destroy_inode(struct inode *inode); 2332void btrfs_destroy_inode(struct inode *inode);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4deb280f8969..c41db6d45ab6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3968,7 +3968,7 @@ err:
3968 return ret; 3968 return ret;
3969} 3969}
3970 3970
3971int btrfs_write_inode(struct inode *inode, int wait) 3971int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
3972{ 3972{
3973 struct btrfs_root *root = BTRFS_I(inode)->root; 3973 struct btrfs_root *root = BTRFS_I(inode)->root;
3974 struct btrfs_trans_handle *trans; 3974 struct btrfs_trans_handle *trans;
@@ -3977,7 +3977,7 @@ int btrfs_write_inode(struct inode *inode, int wait)
3977 if (root->fs_info->btree_inode == inode) 3977 if (root->fs_info->btree_inode == inode)
3978 return 0; 3978 return 0;
3979 3979
3980 if (wait) { 3980 if (wbc->sync_mode == WB_SYNC_ALL) {
3981 trans = btrfs_join_transaction(root, 1); 3981 trans = btrfs_join_transaction(root, 1);
3982 btrfs_set_trans_block_group(trans, inode); 3982 btrfs_set_trans_block_group(trans, inode);
3983 ret = btrfs_commit_transaction(trans, root); 3983 ret = btrfs_commit_transaction(trans, root);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 49503d2edc7e..bc0025cdd1c9 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,6 +1,7 @@
1Version 1.62 1Version 1.62
2------------ 2------------
3Add sockopt=TCP_NODELAY mount option. 3Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
4to more strictly handle corrupt frames.
4 5
5Version 1.61 6Version 1.61
6------------ 7------------
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ed751bb657db..a1c817eb291a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -205,7 +205,7 @@ struct cifsUidInfo {
205struct cifsSesInfo { 205struct cifsSesInfo {
206 struct list_head smb_ses_list; 206 struct list_head smb_ses_list;
207 struct list_head tcon_list; 207 struct list_head tcon_list;
208 struct semaphore sesSem; 208 struct mutex session_mutex;
209#if 0 209#if 0
210 struct cifsUidInfo *uidInfo; /* pointer to user info */ 210 struct cifsUidInfo *uidInfo; /* pointer to user info */
211#endif 211#endif
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 3877737f96a6..14d036d8db11 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -415,10 +415,10 @@ struct smb_hdr {
415 __u8 WordCount; 415 __u8 WordCount;
416} __attribute__((packed)); 416} __attribute__((packed));
417/* given a pointer to an smb_hdr retrieve the value of byte count */ 417/* given a pointer to an smb_hdr retrieve the value of byte count */
418#define BCC(smb_var) (*(__u16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount))) 418#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
419#define BCC_LE(smb_var) (*(__le16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount))) 419#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
420/* given a pointer to an smb_hdr retrieve the pointer to the byte area */ 420/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
421#define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount) + 2) 421#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
422 422
423/* 423/*
424 * Computer Name Length (since Netbios name was length 16 with last byte 0x20) 424 * Computer Name Length (since Netbios name was length 16 with last byte 0x20)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 5646727e33f5..88e2bc44ac58 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -363,13 +363,10 @@ extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
363 __u32 filter, struct file *file, int multishot, 363 __u32 filter, struct file *file, int multishot,
364 const struct nls_table *nls_codepage); 364 const struct nls_table *nls_codepage);
365extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, 365extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
366 const unsigned char *searchName, char *EAData, 366 const unsigned char *searchName,
367 const unsigned char *ea_name, char *EAData,
367 size_t bufsize, const struct nls_table *nls_codepage, 368 size_t bufsize, const struct nls_table *nls_codepage,
368 int remap_special_chars); 369 int remap_special_chars);
369extern ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
370 const unsigned char *searchName, const unsigned char *ea_name,
371 unsigned char *ea_value, size_t buf_size,
372 const struct nls_table *nls_codepage, int remap_special_chars);
373extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, 370extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
374 const char *fileName, const char *ea_name, 371 const char *fileName, const char *ea_name,
375 const void *ea_value, const __u16 ea_value_len, 372 const void *ea_value, const __u16 ea_value_len,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 941441d3e386..9d17df3e0768 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -170,19 +170,19 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
170 * need to prevent multiple threads trying to simultaneously 170 * need to prevent multiple threads trying to simultaneously
171 * reconnect the same SMB session 171 * reconnect the same SMB session
172 */ 172 */
173 down(&ses->sesSem); 173 mutex_lock(&ses->session_mutex);
174 if (ses->need_reconnect) 174 if (ses->need_reconnect)
175 rc = cifs_setup_session(0, ses, nls_codepage); 175 rc = cifs_setup_session(0, ses, nls_codepage);
176 176
177 /* do we need to reconnect tcon? */ 177 /* do we need to reconnect tcon? */
178 if (rc || !tcon->need_reconnect) { 178 if (rc || !tcon->need_reconnect) {
179 up(&ses->sesSem); 179 mutex_unlock(&ses->session_mutex);
180 goto out; 180 goto out;
181 } 181 }
182 182
183 mark_open_files_invalid(tcon); 183 mark_open_files_invalid(tcon);
184 rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage); 184 rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
185 up(&ses->sesSem); 185 mutex_unlock(&ses->session_mutex);
186 cFYI(1, ("reconnect tcon rc = %d", rc)); 186 cFYI(1, ("reconnect tcon rc = %d", rc));
187 187
188 if (rc) 188 if (rc)
@@ -700,13 +700,13 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
700 if (!ses || !ses->server) 700 if (!ses || !ses->server)
701 return -EIO; 701 return -EIO;
702 702
703 down(&ses->sesSem); 703 mutex_lock(&ses->session_mutex);
704 if (ses->need_reconnect) 704 if (ses->need_reconnect)
705 goto session_already_dead; /* no need to send SMBlogoff if uid 705 goto session_already_dead; /* no need to send SMBlogoff if uid
706 already closed due to reconnect */ 706 already closed due to reconnect */
707 rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); 707 rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
708 if (rc) { 708 if (rc) {
709 up(&ses->sesSem); 709 mutex_unlock(&ses->session_mutex);
710 return rc; 710 return rc;
711 } 711 }
712 712
@@ -721,7 +721,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
721 pSMB->AndXCommand = 0xFF; 721 pSMB->AndXCommand = 0xFF;
722 rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); 722 rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
723session_already_dead: 723session_already_dead:
724 up(&ses->sesSem); 724 mutex_unlock(&ses->session_mutex);
725 725
726 /* if session dead then we do not need to do ulogoff, 726 /* if session dead then we do not need to do ulogoff,
727 since server closed smb session, no sense reporting 727 since server closed smb session, no sense reporting
@@ -5269,22 +5269,34 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
5269 cifs_buf_release(pSMB); 5269 cifs_buf_release(pSMB);
5270 return rc; 5270 return rc;
5271} 5271}
5272
5272#ifdef CONFIG_CIFS_XATTR 5273#ifdef CONFIG_CIFS_XATTR
5274/*
5275 * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
5276 * function used by listxattr and getxattr type calls. When ea_name is set,
5277 * it looks for that attribute name and stuffs that value into the EAData
5278 * buffer. When ea_name is NULL, it stuffs a list of attribute names into the
5279 * buffer. In both cases, the return value is either the length of the
5280 * resulting data or a negative error code. If EAData is a NULL pointer then
5281 * the data isn't copied to it, but the length is returned.
5282 */
5273ssize_t 5283ssize_t
5274CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, 5284CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
5275 const unsigned char *searchName, 5285 const unsigned char *searchName, const unsigned char *ea_name,
5276 char *EAData, size_t buf_size, 5286 char *EAData, size_t buf_size,
5277 const struct nls_table *nls_codepage, int remap) 5287 const struct nls_table *nls_codepage, int remap)
5278{ 5288{
5279 /* BB assumes one setup word */ 5289 /* BB assumes one setup word */
5280 TRANSACTION2_QPI_REQ *pSMB = NULL; 5290 TRANSACTION2_QPI_REQ *pSMB = NULL;
5281 TRANSACTION2_QPI_RSP *pSMBr = NULL; 5291 TRANSACTION2_QPI_RSP *pSMBr = NULL;
5282 int rc = 0; 5292 int rc = 0;
5283 int bytes_returned; 5293 int bytes_returned;
5284 int name_len; 5294 int list_len;
5295 struct fealist *ea_response_data;
5285 struct fea *temp_fea; 5296 struct fea *temp_fea;
5286 char *temp_ptr; 5297 char *temp_ptr;
5287 __u16 params, byte_count; 5298 char *end_of_smb;
5299 __u16 params, byte_count, data_offset;
5288 5300
5289 cFYI(1, ("In Query All EAs path %s", searchName)); 5301 cFYI(1, ("In Query All EAs path %s", searchName));
5290QAllEAsRetry: 5302QAllEAsRetry:
@@ -5294,22 +5306,22 @@ QAllEAsRetry:
5294 return rc; 5306 return rc;
5295 5307
5296 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 5308 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
5297 name_len = 5309 list_len =
5298 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 5310 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
5299 PATH_MAX, nls_codepage, remap); 5311 PATH_MAX, nls_codepage, remap);
5300 name_len++; /* trailing null */ 5312 list_len++; /* trailing null */
5301 name_len *= 2; 5313 list_len *= 2;
5302 } else { /* BB improve the check for buffer overruns BB */ 5314 } else { /* BB improve the check for buffer overruns BB */
5303 name_len = strnlen(searchName, PATH_MAX); 5315 list_len = strnlen(searchName, PATH_MAX);
5304 name_len++; /* trailing null */ 5316 list_len++; /* trailing null */
5305 strncpy(pSMB->FileName, searchName, name_len); 5317 strncpy(pSMB->FileName, searchName, list_len);
5306 } 5318 }
5307 5319
5308 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 5320 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
5309 pSMB->TotalDataCount = 0; 5321 pSMB->TotalDataCount = 0;
5310 pSMB->MaxParameterCount = cpu_to_le16(2); 5322 pSMB->MaxParameterCount = cpu_to_le16(2);
5311 /* BB find exact max SMB PDU from sess structure BB */ 5323 /* BB find exact max SMB PDU from sess structure BB */
5312 pSMB->MaxDataCount = cpu_to_le16(4000); 5324 pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
5313 pSMB->MaxSetupCount = 0; 5325 pSMB->MaxSetupCount = 0;
5314 pSMB->Reserved = 0; 5326 pSMB->Reserved = 0;
5315 pSMB->Flags = 0; 5327 pSMB->Flags = 0;
@@ -5334,237 +5346,117 @@ QAllEAsRetry:
5334 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 5346 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
5335 if (rc) { 5347 if (rc) {
5336 cFYI(1, ("Send error in QueryAllEAs = %d", rc)); 5348 cFYI(1, ("Send error in QueryAllEAs = %d", rc));
5337 } else { /* decode response */ 5349 goto QAllEAsOut;
5338 rc = validate_t2((struct smb_t2_rsp *)pSMBr); 5350 }
5339 5351
5340 /* BB also check enough total bytes returned */ 5352
5341 /* BB we need to improve the validity checking 5353 /* BB also check enough total bytes returned */
5342 of these trans2 responses */ 5354 /* BB we need to improve the validity checking
5343 if (rc || (pSMBr->ByteCount < 4)) 5355 of these trans2 responses */
5344 rc = -EIO; /* bad smb */ 5356
5345 /* else if (pFindData){ 5357 rc = validate_t2((struct smb_t2_rsp *)pSMBr);
5346 memcpy((char *) pFindData, 5358 if (rc || (pSMBr->ByteCount < 4)) {
5347 (char *) &pSMBr->hdr.Protocol + 5359 rc = -EIO; /* bad smb */
5348 data_offset, kl); 5360 goto QAllEAsOut;
5349 }*/ else {
5350 /* check that length of list is not more than bcc */
5351 /* check that each entry does not go beyond length
5352 of list */
5353 /* check that each element of each entry does not
5354 go beyond end of list */
5355 __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
5356 struct fealist *ea_response_data;
5357 rc = 0;
5358 /* validate_trans2_offsets() */
5359 /* BB check if start of smb + data_offset > &bcc+ bcc */
5360 ea_response_data = (struct fealist *)
5361 (((char *) &pSMBr->hdr.Protocol) +
5362 data_offset);
5363 name_len = le32_to_cpu(ea_response_data->list_len);
5364 cFYI(1, ("ea length %d", name_len));
5365 if (name_len <= 8) {
5366 /* returned EA size zeroed at top of function */
5367 cFYI(1, ("empty EA list returned from server"));
5368 } else {
5369 /* account for ea list len */
5370 name_len -= 4;
5371 temp_fea = ea_response_data->list;
5372 temp_ptr = (char *)temp_fea;
5373 while (name_len > 0) {
5374 __u16 value_len;
5375 name_len -= 4;
5376 temp_ptr += 4;
5377 rc += temp_fea->name_len;
5378 /* account for prefix user. and trailing null */
5379 rc = rc + 5 + 1;
5380 if (rc < (int)buf_size) {
5381 memcpy(EAData, "user.", 5);
5382 EAData += 5;
5383 memcpy(EAData, temp_ptr,
5384 temp_fea->name_len);
5385 EAData += temp_fea->name_len;
5386 /* null terminate name */
5387 *EAData = 0;
5388 EAData = EAData + 1;
5389 } else if (buf_size == 0) {
5390 /* skip copy - calc size only */
5391 } else {
5392 /* stop before overrun buffer */
5393 rc = -ERANGE;
5394 break;
5395 }
5396 name_len -= temp_fea->name_len;
5397 temp_ptr += temp_fea->name_len;
5398 /* account for trailing null */
5399 name_len--;
5400 temp_ptr++;
5401 value_len =
5402 le16_to_cpu(temp_fea->value_len);
5403 name_len -= value_len;
5404 temp_ptr += value_len;
5405 /* BB check that temp_ptr is still
5406 within the SMB BB*/
5407
5408 /* no trailing null to account for
5409 in value len */
5410 /* go on to next EA */
5411 temp_fea = (struct fea *)temp_ptr;
5412 }
5413 }
5414 }
5415 } 5361 }
5416 cifs_buf_release(pSMB);
5417 if (rc == -EAGAIN)
5418 goto QAllEAsRetry;
5419 5362
5420 return (ssize_t)rc; 5363 /* check that length of list is not more than bcc */
5421} 5364 /* check that each entry does not go beyond length
5365 of list */
5366 /* check that each element of each entry does not
5367 go beyond end of list */
5368 /* validate_trans2_offsets() */
5369 /* BB check if start of smb + data_offset > &bcc+ bcc */
5422 5370
5423ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon, 5371 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
5424 const unsigned char *searchName, const unsigned char *ea_name, 5372 ea_response_data = (struct fealist *)
5425 unsigned char *ea_value, size_t buf_size, 5373 (((char *) &pSMBr->hdr.Protocol) + data_offset);
5426 const struct nls_table *nls_codepage, int remap)
5427{
5428 TRANSACTION2_QPI_REQ *pSMB = NULL;
5429 TRANSACTION2_QPI_RSP *pSMBr = NULL;
5430 int rc = 0;
5431 int bytes_returned;
5432 int name_len;
5433 struct fea *temp_fea;
5434 char *temp_ptr;
5435 __u16 params, byte_count;
5436 5374
5437 cFYI(1, ("In Query EA path %s", searchName)); 5375 list_len = le32_to_cpu(ea_response_data->list_len);
5438QEARetry: 5376 cFYI(1, ("ea length %d", list_len));
5439 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 5377 if (list_len <= 8) {
5440 (void **) &pSMBr); 5378 cFYI(1, ("empty EA list returned from server"));
5441 if (rc) 5379 goto QAllEAsOut;
5442 return rc; 5380 }
5443 5381
5444 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 5382 /* make sure list_len doesn't go past end of SMB */
5445 name_len = 5383 end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
5446 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 5384 if ((char *)ea_response_data + list_len > end_of_smb) {
5447 PATH_MAX, nls_codepage, remap); 5385 cFYI(1, ("EA list appears to go beyond SMB"));
5448 name_len++; /* trailing null */ 5386 rc = -EIO;
5449 name_len *= 2; 5387 goto QAllEAsOut;
5450 } else { /* BB improve the check for buffer overruns BB */
5451 name_len = strnlen(searchName, PATH_MAX);
5452 name_len++; /* trailing null */
5453 strncpy(pSMB->FileName, searchName, name_len);
5454 } 5388 }
5455 5389
5456 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 5390 /* account for ea list len */
5457 pSMB->TotalDataCount = 0; 5391 list_len -= 4;
5458 pSMB->MaxParameterCount = cpu_to_le16(2); 5392 temp_fea = ea_response_data->list;
5459 /* BB find exact max SMB PDU from sess structure BB */ 5393 temp_ptr = (char *)temp_fea;
5460 pSMB->MaxDataCount = cpu_to_le16(4000); 5394 while (list_len > 0) {
5461 pSMB->MaxSetupCount = 0; 5395 unsigned int name_len;
5462 pSMB->Reserved = 0; 5396 __u16 value_len;
5463 pSMB->Flags = 0; 5397
5464 pSMB->Timeout = 0; 5398 list_len -= 4;
5465 pSMB->Reserved2 = 0; 5399 temp_ptr += 4;
5466 pSMB->ParameterOffset = cpu_to_le16(offsetof( 5400 /* make sure we can read name_len and value_len */
5467 struct smb_com_transaction2_qpi_req, InformationLevel) - 4); 5401 if (list_len < 0) {
5468 pSMB->DataCount = 0; 5402 cFYI(1, ("EA entry goes beyond length of list"));
5469 pSMB->DataOffset = 0; 5403 rc = -EIO;
5470 pSMB->SetupCount = 1; 5404 goto QAllEAsOut;
5471 pSMB->Reserved3 = 0; 5405 }
5472 pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
5473 byte_count = params + 1 /* pad */ ;
5474 pSMB->TotalParameterCount = cpu_to_le16(params);
5475 pSMB->ParameterCount = pSMB->TotalParameterCount;
5476 pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
5477 pSMB->Reserved4 = 0;
5478 pSMB->hdr.smb_buf_length += byte_count;
5479 pSMB->ByteCount = cpu_to_le16(byte_count);
5480 5406
5481 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 5407 name_len = temp_fea->name_len;
5482 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 5408 value_len = le16_to_cpu(temp_fea->value_len);
5483 if (rc) { 5409 list_len -= name_len + 1 + value_len;
5484 cFYI(1, ("Send error in Query EA = %d", rc)); 5410 if (list_len < 0) {
5485 } else { /* decode response */ 5411 cFYI(1, ("EA entry goes beyond length of list"));
5486 rc = validate_t2((struct smb_t2_rsp *)pSMBr); 5412 rc = -EIO;
5413 goto QAllEAsOut;
5414 }
5487 5415
5488 /* BB also check enough total bytes returned */ 5416 if (ea_name) {
5489 /* BB we need to improve the validity checking 5417 if (strncmp(ea_name, temp_ptr, name_len) == 0) {
5490 of these trans2 responses */ 5418 temp_ptr += name_len + 1;
5491 if (rc || (pSMBr->ByteCount < 4)) 5419 rc = value_len;
5492 rc = -EIO; /* bad smb */ 5420 if (buf_size == 0)
5493 /* else if (pFindData){ 5421 goto QAllEAsOut;
5494 memcpy((char *) pFindData, 5422 if ((size_t)value_len > buf_size) {
5495 (char *) &pSMBr->hdr.Protocol + 5423 rc = -ERANGE;
5496 data_offset, kl); 5424 goto QAllEAsOut;
5497 }*/ else {
5498 /* check that length of list is not more than bcc */
5499 /* check that each entry does not go beyond length
5500 of list */
5501 /* check that each element of each entry does not
5502 go beyond end of list */
5503 __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
5504 struct fealist *ea_response_data;
5505 rc = -ENODATA;
5506 /* validate_trans2_offsets() */
5507 /* BB check if start of smb + data_offset > &bcc+ bcc*/
5508 ea_response_data = (struct fealist *)
5509 (((char *) &pSMBr->hdr.Protocol) +
5510 data_offset);
5511 name_len = le32_to_cpu(ea_response_data->list_len);
5512 cFYI(1, ("ea length %d", name_len));
5513 if (name_len <= 8) {
5514 /* returned EA size zeroed at top of function */
5515 cFYI(1, ("empty EA list returned from server"));
5516 } else {
5517 /* account for ea list len */
5518 name_len -= 4;
5519 temp_fea = ea_response_data->list;
5520 temp_ptr = (char *)temp_fea;
5521 /* loop through checking if we have a matching
5522 name and then return the associated value */
5523 while (name_len > 0) {
5524 __u16 value_len;
5525 name_len -= 4;
5526 temp_ptr += 4;
5527 value_len =
5528 le16_to_cpu(temp_fea->value_len);
5529 /* BB validate that value_len falls within SMB,
5530 even though maximum for name_len is 255 */
5531 if (memcmp(temp_fea->name, ea_name,
5532 temp_fea->name_len) == 0) {
5533 /* found a match */
5534 rc = value_len;
5535 /* account for prefix user. and trailing null */
5536 if (rc <= (int)buf_size) {
5537 memcpy(ea_value,
5538 temp_fea->name+temp_fea->name_len+1,
5539 rc);
5540 /* ea values, unlike ea
5541 names, are not null
5542 terminated */
5543 } else if (buf_size == 0) {
5544 /* skip copy - calc size only */
5545 } else {
5546 /* stop before overrun buffer */
5547 rc = -ERANGE;
5548 }
5549 break;
5550 }
5551 name_len -= temp_fea->name_len;
5552 temp_ptr += temp_fea->name_len;
5553 /* account for trailing null */
5554 name_len--;
5555 temp_ptr++;
5556 name_len -= value_len;
5557 temp_ptr += value_len;
5558 /* No trailing null to account for in
5559 value_len. Go on to next EA */
5560 temp_fea = (struct fea *)temp_ptr;
5561 } 5425 }
5426 memcpy(EAData, temp_ptr, value_len);
5427 goto QAllEAsOut;
5428 }
5429 } else {
5430 /* account for prefix user. and trailing null */
5431 rc += (5 + 1 + name_len);
5432 if (rc < (int) buf_size) {
5433 memcpy(EAData, "user.", 5);
5434 EAData += 5;
5435 memcpy(EAData, temp_ptr, name_len);
5436 EAData += name_len;
5437 /* null terminate name */
5438 *EAData = 0;
5439 ++EAData;
5440 } else if (buf_size == 0) {
5441 /* skip copy - calc size only */
5442 } else {
5443 /* stop before overrun buffer */
5444 rc = -ERANGE;
5445 break;
5562 } 5446 }
5563 } 5447 }
5448 temp_ptr += name_len + 1 + value_len;
5449 temp_fea = (struct fea *)temp_ptr;
5564 } 5450 }
5451
5452 /* didn't find the named attribute */
5453 if (ea_name)
5454 rc = -ENODATA;
5455
5456QAllEAsOut:
5565 cifs_buf_release(pSMB); 5457 cifs_buf_release(pSMB);
5566 if (rc == -EAGAIN) 5458 if (rc == -EAGAIN)
5567 goto QEARetry; 5459 goto QAllEAsRetry;
5568 5460
5569 return (ssize_t)rc; 5461 return (ssize_t)rc;
5570} 5462}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2e9e09ca0e30..45eb6cba793f 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2388,13 +2388,13 @@ try_mount_again:
2388 */ 2388 */
2389 cifs_put_tcp_session(srvTcp); 2389 cifs_put_tcp_session(srvTcp);
2390 2390
2391 down(&pSesInfo->sesSem); 2391 mutex_lock(&pSesInfo->session_mutex);
2392 if (pSesInfo->need_reconnect) { 2392 if (pSesInfo->need_reconnect) {
2393 cFYI(1, ("Session needs reconnect")); 2393 cFYI(1, ("Session needs reconnect"));
2394 rc = cifs_setup_session(xid, pSesInfo, 2394 rc = cifs_setup_session(xid, pSesInfo,
2395 cifs_sb->local_nls); 2395 cifs_sb->local_nls);
2396 } 2396 }
2397 up(&pSesInfo->sesSem); 2397 mutex_unlock(&pSesInfo->session_mutex);
2398 } else if (!rc) { 2398 } else if (!rc) {
2399 cFYI(1, ("Existing smb sess not found")); 2399 cFYI(1, ("Existing smb sess not found"));
2400 pSesInfo = sesInfoAlloc(); 2400 pSesInfo = sesInfoAlloc();
@@ -2437,12 +2437,12 @@ try_mount_again:
2437 } 2437 }
2438 pSesInfo->linux_uid = volume_info->linux_uid; 2438 pSesInfo->linux_uid = volume_info->linux_uid;
2439 pSesInfo->overrideSecFlg = volume_info->secFlg; 2439 pSesInfo->overrideSecFlg = volume_info->secFlg;
2440 down(&pSesInfo->sesSem); 2440 mutex_lock(&pSesInfo->session_mutex);
2441 2441
2442 /* BB FIXME need to pass vol->secFlgs BB */ 2442 /* BB FIXME need to pass vol->secFlgs BB */
2443 rc = cifs_setup_session(xid, pSesInfo, 2443 rc = cifs_setup_session(xid, pSesInfo,
2444 cifs_sb->local_nls); 2444 cifs_sb->local_nls);
2445 up(&pSesInfo->sesSem); 2445 mutex_unlock(&pSesInfo->session_mutex);
2446 } 2446 }
2447 2447
2448 /* search for existing tcon to this server share */ 2448 /* search for existing tcon to this server share */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 057e1dae12ab..3d8f8a96f5a3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2289,9 +2289,9 @@ cifs_oplock_break(struct slow_work *work)
2289 if (inode && S_ISREG(inode->i_mode)) { 2289 if (inode && S_ISREG(inode->i_mode)) {
2290#ifdef CONFIG_CIFS_EXPERIMENTAL 2290#ifdef CONFIG_CIFS_EXPERIMENTAL
2291 if (cinode->clientCanCacheAll == 0) 2291 if (cinode->clientCanCacheAll == 0)
2292 break_lease(inode, FMODE_READ); 2292 break_lease(inode, O_RDONLY);
2293 else if (cinode->clientCanCacheRead == 0) 2293 else if (cinode->clientCanCacheRead == 0)
2294 break_lease(inode, FMODE_WRITE); 2294 break_lease(inode, O_WRONLY);
2295#endif 2295#endif
2296 rc = filemap_fdatawrite(inode->i_mapping); 2296 rc = filemap_fdatawrite(inode->i_mapping);
2297 if (cinode->clientCanCacheRead == 0) { 2297 if (cinode->clientCanCacheRead == 0) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index e3fda978f481..8bdbc818164c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -111,6 +111,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
111 111
112 cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; 112 cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
113 113
114 cifs_i->server_eof = fattr->cf_eof;
114 /* 115 /*
115 * Can't safely change the file size here if the client is writing to 116 * Can't safely change the file size here if the client is writing to
116 * it due to potential races. 117 * it due to potential races.
@@ -366,7 +367,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
366 char ea_value[4]; 367 char ea_value[4];
367 __u32 mode; 368 __u32 mode;
368 369
369 rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS", 370 rc = CIFSSMBQAllEAs(xid, cifs_sb->tcon, path, "SETFILEBITS",
370 ea_value, 4 /* size of buf */, cifs_sb->local_nls, 371 ea_value, 4 /* size of buf */, cifs_sb->local_nls,
371 cifs_sb->mnt_cifs_flags & 372 cifs_sb->mnt_cifs_flags &
372 CIFS_MOUNT_MAP_SPECIAL_CHR); 373 CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index d27d4ec6579b..d1474996a812 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -79,7 +79,7 @@ sesInfoAlloc(void)
79 ++ret_buf->ses_count; 79 ++ret_buf->ses_count;
80 INIT_LIST_HEAD(&ret_buf->smb_ses_list); 80 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
81 INIT_LIST_HEAD(&ret_buf->tcon_list); 81 INIT_LIST_HEAD(&ret_buf->tcon_list);
82 init_MUTEX(&ret_buf->sesSem); 82 mutex_init(&ret_buf->session_mutex);
83 } 83 }
84 return ret_buf; 84 return ret_buf;
85} 85}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a75afa3dd9e1..3e2ef0de1209 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -244,7 +244,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
244 /* revalidate/getattr then populate from inode */ 244 /* revalidate/getattr then populate from inode */
245 } /* BB add else when above is implemented */ 245 } /* BB add else when above is implemented */
246 ea_name += 5; /* skip past user. prefix */ 246 ea_name += 5; /* skip past user. prefix */
247 rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value, 247 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
248 buf_size, cifs_sb->local_nls, 248 buf_size, cifs_sb->local_nls,
249 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 249 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
250 } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) { 250 } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
@@ -252,7 +252,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
252 goto get_ea_exit; 252 goto get_ea_exit;
253 253
254 ea_name += 4; /* skip past os2. prefix */ 254 ea_name += 4; /* skip past os2. prefix */
255 rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value, 255 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
256 buf_size, cifs_sb->local_nls, 256 buf_size, cifs_sb->local_nls,
257 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 257 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
258 } else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS, 258 } else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
@@ -364,8 +364,8 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
364 /* if proc/fs/cifs/streamstoxattr is set then 364 /* if proc/fs/cifs/streamstoxattr is set then
365 search server for EAs or streams to 365 search server for EAs or streams to
366 returns as xattrs */ 366 returns as xattrs */
367 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, data, buf_size, 367 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, NULL, data,
368 cifs_sb->local_nls, 368 buf_size, cifs_sb->local_nls,
369 cifs_sb->mnt_cifs_flags & 369 cifs_sb->mnt_cifs_flags &
370 CIFS_MOUNT_MAP_SPECIAL_CHR); 370 CIFS_MOUNT_MAP_SPECIAL_CHR);
371 371
diff --git a/fs/dcache.c b/fs/dcache.c
index 953173a293a9..f1358e5c3a59 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -257,6 +257,7 @@ kill_it:
257 if (dentry) 257 if (dentry)
258 goto repeat; 258 goto repeat;
259} 259}
260EXPORT_SYMBOL(dput);
260 261
261/** 262/**
262 * d_invalidate - invalidate a dentry 263 * d_invalidate - invalidate a dentry
@@ -314,6 +315,7 @@ int d_invalidate(struct dentry * dentry)
314 spin_unlock(&dcache_lock); 315 spin_unlock(&dcache_lock);
315 return 0; 316 return 0;
316} 317}
318EXPORT_SYMBOL(d_invalidate);
317 319
318/* This should be called _only_ with dcache_lock held */ 320/* This should be called _only_ with dcache_lock held */
319 321
@@ -328,6 +330,7 @@ struct dentry * dget_locked(struct dentry *dentry)
328{ 330{
329 return __dget_locked(dentry); 331 return __dget_locked(dentry);
330} 332}
333EXPORT_SYMBOL(dget_locked);
331 334
332/** 335/**
333 * d_find_alias - grab a hashed alias of inode 336 * d_find_alias - grab a hashed alias of inode
@@ -384,6 +387,7 @@ struct dentry * d_find_alias(struct inode *inode)
384 } 387 }
385 return de; 388 return de;
386} 389}
390EXPORT_SYMBOL(d_find_alias);
387 391
388/* 392/*
389 * Try to kill dentries associated with this inode. 393 * Try to kill dentries associated with this inode.
@@ -408,6 +412,7 @@ restart:
408 } 412 }
409 spin_unlock(&dcache_lock); 413 spin_unlock(&dcache_lock);
410} 414}
415EXPORT_SYMBOL(d_prune_aliases);
411 416
412/* 417/*
413 * Throw away a dentry - free the inode, dput the parent. This requires that 418 * Throw away a dentry - free the inode, dput the parent. This requires that
@@ -610,6 +615,7 @@ void shrink_dcache_sb(struct super_block * sb)
610{ 615{
611 __shrink_dcache_sb(sb, NULL, 0); 616 __shrink_dcache_sb(sb, NULL, 0);
612} 617}
618EXPORT_SYMBOL(shrink_dcache_sb);
613 619
614/* 620/*
615 * destroy a single subtree of dentries for unmount 621 * destroy a single subtree of dentries for unmount
@@ -792,6 +798,7 @@ positive:
792 spin_unlock(&dcache_lock); 798 spin_unlock(&dcache_lock);
793 return 1; 799 return 1;
794} 800}
801EXPORT_SYMBOL(have_submounts);
795 802
796/* 803/*
797 * Search the dentry child list for the specified parent, 804 * Search the dentry child list for the specified parent,
@@ -876,6 +883,7 @@ void shrink_dcache_parent(struct dentry * parent)
876 while ((found = select_parent(parent)) != 0) 883 while ((found = select_parent(parent)) != 0)
877 __shrink_dcache_sb(sb, &found, 0); 884 __shrink_dcache_sb(sb, &found, 0);
878} 885}
886EXPORT_SYMBOL(shrink_dcache_parent);
879 887
880/* 888/*
881 * Scan `nr' dentries and return the number which remain. 889 * Scan `nr' dentries and return the number which remain.
@@ -968,6 +976,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
968 976
969 return dentry; 977 return dentry;
970} 978}
979EXPORT_SYMBOL(d_alloc);
971 980
972struct dentry *d_alloc_name(struct dentry *parent, const char *name) 981struct dentry *d_alloc_name(struct dentry *parent, const char *name)
973{ 982{
@@ -1012,6 +1021,7 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
1012 spin_unlock(&dcache_lock); 1021 spin_unlock(&dcache_lock);
1013 security_d_instantiate(entry, inode); 1022 security_d_instantiate(entry, inode);
1014} 1023}
1024EXPORT_SYMBOL(d_instantiate);
1015 1025
1016/** 1026/**
1017 * d_instantiate_unique - instantiate a non-aliased dentry 1027 * d_instantiate_unique - instantiate a non-aliased dentry
@@ -1108,6 +1118,7 @@ struct dentry * d_alloc_root(struct inode * root_inode)
1108 } 1118 }
1109 return res; 1119 return res;
1110} 1120}
1121EXPORT_SYMBOL(d_alloc_root);
1111 1122
1112static inline struct hlist_head *d_hash(struct dentry *parent, 1123static inline struct hlist_head *d_hash(struct dentry *parent,
1113 unsigned long hash) 1124 unsigned long hash)
@@ -1211,7 +1222,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1211 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1222 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1212 spin_unlock(&dcache_lock); 1223 spin_unlock(&dcache_lock);
1213 security_d_instantiate(new, inode); 1224 security_d_instantiate(new, inode);
1214 d_rehash(dentry);
1215 d_move(new, dentry); 1225 d_move(new, dentry);
1216 iput(inode); 1226 iput(inode);
1217 } else { 1227 } else {
@@ -1225,6 +1235,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1225 d_add(dentry, inode); 1235 d_add(dentry, inode);
1226 return new; 1236 return new;
1227} 1237}
1238EXPORT_SYMBOL(d_splice_alias);
1228 1239
1229/** 1240/**
1230 * d_add_ci - lookup or allocate new dentry with case-exact name 1241 * d_add_ci - lookup or allocate new dentry with case-exact name
@@ -1314,6 +1325,7 @@ err_out:
1314 iput(inode); 1325 iput(inode);
1315 return ERR_PTR(error); 1326 return ERR_PTR(error);
1316} 1327}
1328EXPORT_SYMBOL(d_add_ci);
1317 1329
1318/** 1330/**
1319 * d_lookup - search for a dentry 1331 * d_lookup - search for a dentry
@@ -1357,6 +1369,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1357 } while (read_seqretry(&rename_lock, seq)); 1369 } while (read_seqretry(&rename_lock, seq));
1358 return dentry; 1370 return dentry;
1359} 1371}
1372EXPORT_SYMBOL(d_lookup);
1360 1373
1361struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1374struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1362{ 1375{
@@ -1483,6 +1496,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
1483out: 1496out:
1484 return 0; 1497 return 0;
1485} 1498}
1499EXPORT_SYMBOL(d_validate);
1486 1500
1487/* 1501/*
1488 * When a file is deleted, we have two options: 1502 * When a file is deleted, we have two options:
@@ -1528,6 +1542,7 @@ void d_delete(struct dentry * dentry)
1528 1542
1529 fsnotify_nameremove(dentry, isdir); 1543 fsnotify_nameremove(dentry, isdir);
1530} 1544}
1545EXPORT_SYMBOL(d_delete);
1531 1546
1532static void __d_rehash(struct dentry * entry, struct hlist_head *list) 1547static void __d_rehash(struct dentry * entry, struct hlist_head *list)
1533{ 1548{
@@ -1556,6 +1571,7 @@ void d_rehash(struct dentry * entry)
1556 spin_unlock(&entry->d_lock); 1571 spin_unlock(&entry->d_lock);
1557 spin_unlock(&dcache_lock); 1572 spin_unlock(&dcache_lock);
1558} 1573}
1574EXPORT_SYMBOL(d_rehash);
1559 1575
1560/* 1576/*
1561 * When switching names, the actual string doesn't strictly have to 1577 * When switching names, the actual string doesn't strictly have to
@@ -1702,6 +1718,7 @@ void d_move(struct dentry * dentry, struct dentry * target)
1702 d_move_locked(dentry, target); 1718 d_move_locked(dentry, target);
1703 spin_unlock(&dcache_lock); 1719 spin_unlock(&dcache_lock);
1704} 1720}
1721EXPORT_SYMBOL(d_move);
1705 1722
1706/** 1723/**
1707 * d_ancestor - search for an ancestor 1724 * d_ancestor - search for an ancestor
@@ -1868,6 +1885,7 @@ shouldnt_be_hashed:
1868 spin_unlock(&dcache_lock); 1885 spin_unlock(&dcache_lock);
1869 BUG(); 1886 BUG();
1870} 1887}
1888EXPORT_SYMBOL_GPL(d_materialise_unique);
1871 1889
1872static int prepend(char **buffer, int *buflen, const char *str, int namelen) 1890static int prepend(char **buffer, int *buflen, const char *str, int namelen)
1873{ 1891{
@@ -2005,6 +2023,7 @@ char *d_path(const struct path *path, char *buf, int buflen)
2005 path_put(&root); 2023 path_put(&root);
2006 return res; 2024 return res;
2007} 2025}
2026EXPORT_SYMBOL(d_path);
2008 2027
2009/* 2028/*
2010 * Helper function for dentry_operations.d_dname() members 2029 * Helper function for dentry_operations.d_dname() members
@@ -2171,6 +2190,30 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2171 return result; 2190 return result;
2172} 2191}
2173 2192
2193int path_is_under(struct path *path1, struct path *path2)
2194{
2195 struct vfsmount *mnt = path1->mnt;
2196 struct dentry *dentry = path1->dentry;
2197 int res;
2198 spin_lock(&vfsmount_lock);
2199 if (mnt != path2->mnt) {
2200 for (;;) {
2201 if (mnt->mnt_parent == mnt) {
2202 spin_unlock(&vfsmount_lock);
2203 return 0;
2204 }
2205 if (mnt->mnt_parent == path2->mnt)
2206 break;
2207 mnt = mnt->mnt_parent;
2208 }
2209 dentry = mnt->mnt_mountpoint;
2210 }
2211 res = is_subdir(dentry, path2->dentry);
2212 spin_unlock(&vfsmount_lock);
2213 return res;
2214}
2215EXPORT_SYMBOL(path_is_under);
2216
2174void d_genocide(struct dentry *root) 2217void d_genocide(struct dentry *root)
2175{ 2218{
2176 struct dentry *this_parent = root; 2219 struct dentry *this_parent = root;
@@ -2228,6 +2271,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2228 } 2271 }
2229 return ino; 2272 return ino;
2230} 2273}
2274EXPORT_SYMBOL(find_inode_number);
2231 2275
2232static __initdata unsigned long dhash_entries; 2276static __initdata unsigned long dhash_entries;
2233static int __init set_dhash_entries(char *str) 2277static int __init set_dhash_entries(char *str)
@@ -2297,6 +2341,7 @@ static void __init dcache_init(void)
2297 2341
2298/* SLAB cache for __getname() consumers */ 2342/* SLAB cache for __getname() consumers */
2299struct kmem_cache *names_cachep __read_mostly; 2343struct kmem_cache *names_cachep __read_mostly;
2344EXPORT_SYMBOL(names_cachep);
2300 2345
2301EXPORT_SYMBOL(d_genocide); 2346EXPORT_SYMBOL(d_genocide);
2302 2347
@@ -2326,26 +2371,3 @@ void __init vfs_caches_init(unsigned long mempages)
2326 bdev_cache_init(); 2371 bdev_cache_init();
2327 chrdev_init(); 2372 chrdev_init();
2328} 2373}
2329
2330EXPORT_SYMBOL(d_alloc);
2331EXPORT_SYMBOL(d_alloc_root);
2332EXPORT_SYMBOL(d_delete);
2333EXPORT_SYMBOL(d_find_alias);
2334EXPORT_SYMBOL(d_instantiate);
2335EXPORT_SYMBOL(d_invalidate);
2336EXPORT_SYMBOL(d_lookup);
2337EXPORT_SYMBOL(d_move);
2338EXPORT_SYMBOL_GPL(d_materialise_unique);
2339EXPORT_SYMBOL(d_path);
2340EXPORT_SYMBOL(d_prune_aliases);
2341EXPORT_SYMBOL(d_rehash);
2342EXPORT_SYMBOL(d_splice_alias);
2343EXPORT_SYMBOL(d_add_ci);
2344EXPORT_SYMBOL(d_validate);
2345EXPORT_SYMBOL(dget_locked);
2346EXPORT_SYMBOL(dput);
2347EXPORT_SYMBOL(find_inode_number);
2348EXPORT_SYMBOL(have_submounts);
2349EXPORT_SYMBOL(names_cachep);
2350EXPORT_SYMBOL(shrink_dcache_parent);
2351EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 274ac865bae8..049d6c36da09 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -496,7 +496,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
496 } 496 }
497 d_move(old_dentry, dentry); 497 d_move(old_dentry, dentry);
498 fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, 498 fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
499 old_dentry->d_name.name, S_ISDIR(old_dentry->d_inode->i_mode), 499 S_ISDIR(old_dentry->d_inode->i_mode),
500 NULL, old_dentry); 500 NULL, old_dentry);
501 fsnotify_oldname_free(old_name); 501 fsnotify_oldname_free(old_name);
502 unlock_rename(new_dir, old_dir); 502 unlock_rename(new_dir, old_dir);
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index b1b178e61718..f0d520312d8b 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -55,6 +55,8 @@
55/* exofs Application specific page/attribute */ 55/* exofs Application specific page/attribute */
56# define EXOFS_APAGE_FS_DATA (OSD_APAGE_APP_DEFINED_FIRST + 3) 56# define EXOFS_APAGE_FS_DATA (OSD_APAGE_APP_DEFINED_FIRST + 3)
57# define EXOFS_ATTR_INODE_DATA 1 57# define EXOFS_ATTR_INODE_DATA 1
58# define EXOFS_ATTR_INODE_FILE_LAYOUT 2
59# define EXOFS_ATTR_INODE_DIR_LAYOUT 3
58 60
59/* 61/*
60 * The maximum number of files we can have is limited by the size of the 62 * The maximum number of files we can have is limited by the size of the
@@ -206,4 +208,41 @@ enum {
206 (((name_len) + offsetof(struct exofs_dir_entry, name) + \ 208 (((name_len) + offsetof(struct exofs_dir_entry, name) + \
207 EXOFS_DIR_ROUND) & ~EXOFS_DIR_ROUND) 209 EXOFS_DIR_ROUND) & ~EXOFS_DIR_ROUND)
208 210
211/*
212 * The on-disk (optional) layout structure.
213 * sits in an EXOFS_ATTR_INODE_FILE_LAYOUT or EXOFS_ATTR_INODE_DIR_LAYOUT
214 * attribute, attached to any inode, usually to a directory.
215 */
216
217enum exofs_inode_layout_gen_functions {
218 LAYOUT_MOVING_WINDOW = 0,
219 LAYOUT_IMPLICT = 1,
220};
221
222struct exofs_on_disk_inode_layout {
223 __le16 gen_func; /* One of enum exofs_inode_layout_gen_functions */
224 __le16 pad;
225 union {
226 /* gen_func == LAYOUT_MOVING_WINDOW (default) */
227 struct exofs_layout_sliding_window {
228 __le32 num_devices; /* first n devices in global-table*/
229 } sliding_window __packed;
230
231 /* gen_func == LAYOUT_IMPLICT */
232 struct exofs_layout_implict_list {
233 struct exofs_dt_data_map data_map;
234 /* Variable array of size data_map.cb_num_comps. These
235 * are device indexes of the devices in the global table
236 */
237 __le32 dev_indexes[];
238 } implict __packed;
239 };
240} __packed;
241
242static inline size_t exofs_on_disk_inode_layout_size(unsigned max_devs)
243{
244 return sizeof(struct exofs_on_disk_inode_layout) +
245 max_devs * sizeof(__le32);
246}
247
209#endif /*ifndef __EXOFS_COM_H__*/ 248#endif /*ifndef __EXOFS_COM_H__*/
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index c35fd4623986..8442e353309f 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -55,12 +55,28 @@
55/* u64 has problems with printk this will cast it to unsigned long long */ 55/* u64 has problems with printk this will cast it to unsigned long long */
56#define _LLU(x) (unsigned long long)(x) 56#define _LLU(x) (unsigned long long)(x)
57 57
58struct exofs_layout {
59 osd_id s_pid; /* partition ID of file system*/
60
61 /* Our way of looking at the data_map */
62 unsigned stripe_unit;
63 unsigned mirrors_p1;
64
65 unsigned group_width;
66 u64 group_depth;
67 unsigned group_count;
68
69 enum exofs_inode_layout_gen_functions lay_func;
70
71 unsigned s_numdevs; /* Num of devices in array */
72 struct osd_dev *s_ods[0]; /* Variable length */
73};
74
58/* 75/*
59 * our extension to the in-memory superblock 76 * our extension to the in-memory superblock
60 */ 77 */
61struct exofs_sb_info { 78struct exofs_sb_info {
62 struct exofs_fscb s_fscb; /* Written often, pre-allocate*/ 79 struct exofs_fscb s_fscb; /* Written often, pre-allocate*/
63 osd_id s_pid; /* partition ID of file system*/
64 int s_timeout; /* timeout for OSD operations */ 80 int s_timeout; /* timeout for OSD operations */
65 uint64_t s_nextid; /* highest object ID used */ 81 uint64_t s_nextid; /* highest object ID used */
66 uint32_t s_numfiles; /* number of files on fs */ 82 uint32_t s_numfiles; /* number of files on fs */
@@ -69,22 +85,27 @@ struct exofs_sb_info {
69 atomic_t s_curr_pending; /* number of pending commands */ 85 atomic_t s_curr_pending; /* number of pending commands */
70 uint8_t s_cred[OSD_CAP_LEN]; /* credential for the fscb */ 86 uint8_t s_cred[OSD_CAP_LEN]; /* credential for the fscb */
71 87
72 struct pnfs_osd_data_map data_map; /* Default raid to use */ 88 struct pnfs_osd_data_map data_map; /* Default raid to use
73 unsigned s_numdevs; /* Num of devices in array */ 89 * FIXME: Needed ?
74 struct osd_dev *s_ods[1]; /* Variable length, minimum 1 */ 90 */
91/* struct exofs_layout dir_layout;*/ /* Default dir layout */
92 struct exofs_layout layout; /* Default files layout,
93 * contains the variable osd_dev
94 * array. Keep last */
95 struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */
75}; 96};
76 97
77/* 98/*
78 * our extension to the in-memory inode 99 * our extension to the in-memory inode
79 */ 100 */
80struct exofs_i_info { 101struct exofs_i_info {
102 struct inode vfs_inode; /* normal in-memory inode */
103 wait_queue_head_t i_wq; /* wait queue for inode */
81 unsigned long i_flags; /* various atomic flags */ 104 unsigned long i_flags; /* various atomic flags */
82 uint32_t i_data[EXOFS_IDATA];/*short symlink names and device #s*/ 105 uint32_t i_data[EXOFS_IDATA];/*short symlink names and device #s*/
83 uint32_t i_dir_start_lookup; /* which page to start lookup */ 106 uint32_t i_dir_start_lookup; /* which page to start lookup */
84 wait_queue_head_t i_wq; /* wait queue for inode */
85 uint64_t i_commit_size; /* the object's written length */ 107 uint64_t i_commit_size; /* the object's written length */
86 uint8_t i_cred[OSD_CAP_LEN];/* all-powerful credential */ 108 uint8_t i_cred[OSD_CAP_LEN];/* all-powerful credential */
87 struct inode vfs_inode; /* normal in-memory inode */
88}; 109};
89 110
90static inline osd_id exofs_oi_objno(struct exofs_i_info *oi) 111static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
@@ -101,7 +122,7 @@ struct exofs_io_state {
101 void *private; 122 void *private;
102 exofs_io_done_fn done; 123 exofs_io_done_fn done;
103 124
104 struct exofs_sb_info *sbi; 125 struct exofs_layout *layout;
105 struct osd_obj_id obj; 126 struct osd_obj_id obj;
106 u8 *cred; 127 u8 *cred;
107 128
@@ -109,7 +130,11 @@ struct exofs_io_state {
109 loff_t offset; 130 loff_t offset;
110 unsigned long length; 131 unsigned long length;
111 void *kern_buff; 132 void *kern_buff;
112 struct bio *bio; 133
134 struct page **pages;
135 unsigned nr_pages;
136 unsigned pgbase;
137 unsigned pages_consumed;
113 138
114 /* Attributes */ 139 /* Attributes */
115 unsigned in_attr_len; 140 unsigned in_attr_len;
@@ -122,6 +147,9 @@ struct exofs_io_state {
122 struct exofs_per_dev_state { 147 struct exofs_per_dev_state {
123 struct osd_request *or; 148 struct osd_request *or;
124 struct bio *bio; 149 struct bio *bio;
150 loff_t offset;
151 unsigned length;
152 unsigned dev;
125 } per_dev[]; 153 } per_dev[];
126}; 154};
127 155
@@ -175,6 +203,12 @@ static inline struct exofs_i_info *exofs_i(struct inode *inode)
175} 203}
176 204
177/* 205/*
206 * Given a layout, object_number and stripe_index return the associated global
207 * dev_index
208 */
209unsigned exofs_layout_od_id(struct exofs_layout *layout,
210 osd_id obj_no, unsigned layout_index);
211/*
178 * Maximum count of links to a file 212 * Maximum count of links to a file
179 */ 213 */
180#define EXOFS_LINK_MAX 32000 214#define EXOFS_LINK_MAX 32000
@@ -189,7 +223,8 @@ void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
189int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj, 223int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
190 u64 offset, void *p, unsigned length); 224 u64 offset, void *p, unsigned length);
191 225
192int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** ios); 226int exofs_get_io_state(struct exofs_layout *layout,
227 struct exofs_io_state **ios);
193void exofs_put_io_state(struct exofs_io_state *ios); 228void exofs_put_io_state(struct exofs_io_state *ios);
194 229
195int exofs_check_io(struct exofs_io_state *ios, u64 *resid); 230int exofs_check_io(struct exofs_io_state *ios, u64 *resid);
@@ -226,7 +261,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
226 struct page **pagep, void **fsdata); 261 struct page **pagep, void **fsdata);
227extern struct inode *exofs_iget(struct super_block *, unsigned long); 262extern struct inode *exofs_iget(struct super_block *, unsigned long);
228struct inode *exofs_new_inode(struct inode *, int); 263struct inode *exofs_new_inode(struct inode *, int);
229extern int exofs_write_inode(struct inode *, int); 264extern int exofs_write_inode(struct inode *, struct writeback_control *wbc);
230extern void exofs_delete_inode(struct inode *); 265extern void exofs_delete_inode(struct inode *);
231 266
232/* dir.c: */ 267/* dir.c: */
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 2afbcebeda71..a17e4b733e35 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -41,16 +41,18 @@
41 41
42enum { BIO_MAX_PAGES_KMALLOC = 42enum { BIO_MAX_PAGES_KMALLOC =
43 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec), 43 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
44 MAX_PAGES_KMALLOC =
45 PAGE_SIZE / sizeof(struct page *),
44}; 46};
45 47
46struct page_collect { 48struct page_collect {
47 struct exofs_sb_info *sbi; 49 struct exofs_sb_info *sbi;
48 struct request_queue *req_q;
49 struct inode *inode; 50 struct inode *inode;
50 unsigned expected_pages; 51 unsigned expected_pages;
51 struct exofs_io_state *ios; 52 struct exofs_io_state *ios;
52 53
53 struct bio *bio; 54 struct page **pages;
55 unsigned alloc_pages;
54 unsigned nr_pages; 56 unsigned nr_pages;
55 unsigned long length; 57 unsigned long length;
56 loff_t pg_first; /* keep 64bit also in 32-arches */ 58 loff_t pg_first; /* keep 64bit also in 32-arches */
@@ -62,15 +64,12 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
62 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; 64 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
63 65
64 pcol->sbi = sbi; 66 pcol->sbi = sbi;
65 /* Create master bios on first Q, later on cloning, each clone will be
66 * allocated on it's destination Q
67 */
68 pcol->req_q = osd_request_queue(sbi->s_ods[0]);
69 pcol->inode = inode; 67 pcol->inode = inode;
70 pcol->expected_pages = expected_pages; 68 pcol->expected_pages = expected_pages;
71 69
72 pcol->ios = NULL; 70 pcol->ios = NULL;
73 pcol->bio = NULL; 71 pcol->pages = NULL;
72 pcol->alloc_pages = 0;
74 pcol->nr_pages = 0; 73 pcol->nr_pages = 0;
75 pcol->length = 0; 74 pcol->length = 0;
76 pcol->pg_first = -1; 75 pcol->pg_first = -1;
@@ -80,7 +79,8 @@ static void _pcol_reset(struct page_collect *pcol)
80{ 79{
81 pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages); 80 pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
82 81
83 pcol->bio = NULL; 82 pcol->pages = NULL;
83 pcol->alloc_pages = 0;
84 pcol->nr_pages = 0; 84 pcol->nr_pages = 0;
85 pcol->length = 0; 85 pcol->length = 0;
86 pcol->pg_first = -1; 86 pcol->pg_first = -1;
@@ -90,38 +90,43 @@ static void _pcol_reset(struct page_collect *pcol)
90 * it might not end here. don't be left with nothing 90 * it might not end here. don't be left with nothing
91 */ 91 */
92 if (!pcol->expected_pages) 92 if (!pcol->expected_pages)
93 pcol->expected_pages = BIO_MAX_PAGES_KMALLOC; 93 pcol->expected_pages = MAX_PAGES_KMALLOC;
94} 94}
95 95
96static int pcol_try_alloc(struct page_collect *pcol) 96static int pcol_try_alloc(struct page_collect *pcol)
97{ 97{
98 int pages = min_t(unsigned, pcol->expected_pages, 98 unsigned pages = min_t(unsigned, pcol->expected_pages,
99 BIO_MAX_PAGES_KMALLOC); 99 MAX_PAGES_KMALLOC);
100 100
101 if (!pcol->ios) { /* First time allocate io_state */ 101 if (!pcol->ios) { /* First time allocate io_state */
102 int ret = exofs_get_io_state(pcol->sbi, &pcol->ios); 102 int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios);
103 103
104 if (ret) 104 if (ret)
105 return ret; 105 return ret;
106 } 106 }
107 107
108 /* TODO: easily support bio chaining */
109 pages = min_t(unsigned, pages,
110 pcol->sbi->layout.group_width * BIO_MAX_PAGES_KMALLOC);
111
108 for (; pages; pages >>= 1) { 112 for (; pages; pages >>= 1) {
109 pcol->bio = bio_kmalloc(GFP_KERNEL, pages); 113 pcol->pages = kmalloc(pages * sizeof(struct page *),
110 if (likely(pcol->bio)) 114 GFP_KERNEL);
115 if (likely(pcol->pages)) {
116 pcol->alloc_pages = pages;
111 return 0; 117 return 0;
118 }
112 } 119 }
113 120
114 EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n", 121 EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
115 pcol->expected_pages); 122 pcol->expected_pages);
116 return -ENOMEM; 123 return -ENOMEM;
117} 124}
118 125
119static void pcol_free(struct page_collect *pcol) 126static void pcol_free(struct page_collect *pcol)
120{ 127{
121 if (pcol->bio) { 128 kfree(pcol->pages);
122 bio_put(pcol->bio); 129 pcol->pages = NULL;
123 pcol->bio = NULL;
124 }
125 130
126 if (pcol->ios) { 131 if (pcol->ios) {
127 exofs_put_io_state(pcol->ios); 132 exofs_put_io_state(pcol->ios);
@@ -132,11 +137,10 @@ static void pcol_free(struct page_collect *pcol)
132static int pcol_add_page(struct page_collect *pcol, struct page *page, 137static int pcol_add_page(struct page_collect *pcol, struct page *page,
133 unsigned len) 138 unsigned len)
134{ 139{
135 int added_len = bio_add_pc_page(pcol->req_q, pcol->bio, page, len, 0); 140 if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
136 if (unlikely(len != added_len))
137 return -ENOMEM; 141 return -ENOMEM;
138 142
139 ++pcol->nr_pages; 143 pcol->pages[pcol->nr_pages++] = page;
140 pcol->length += len; 144 pcol->length += len;
141 return 0; 145 return 0;
142} 146}
@@ -181,7 +185,6 @@ static void update_write_page(struct page *page, int ret)
181 */ 185 */
182static int __readpages_done(struct page_collect *pcol, bool do_unlock) 186static int __readpages_done(struct page_collect *pcol, bool do_unlock)
183{ 187{
184 struct bio_vec *bvec;
185 int i; 188 int i;
186 u64 resid; 189 u64 resid;
187 u64 good_bytes; 190 u64 good_bytes;
@@ -193,13 +196,13 @@ static int __readpages_done(struct page_collect *pcol, bool do_unlock)
193 else 196 else
194 good_bytes = pcol->length - resid; 197 good_bytes = pcol->length - resid;
195 198
196 EXOFS_DBGMSG("readpages_done(0x%lx) good_bytes=0x%llx" 199 EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
197 " length=0x%lx nr_pages=%u\n", 200 " length=0x%lx nr_pages=%u\n",
198 pcol->inode->i_ino, _LLU(good_bytes), pcol->length, 201 pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
199 pcol->nr_pages); 202 pcol->nr_pages);
200 203
201 __bio_for_each_segment(bvec, pcol->bio, i, 0) { 204 for (i = 0; i < pcol->nr_pages; i++) {
202 struct page *page = bvec->bv_page; 205 struct page *page = pcol->pages[i];
203 struct inode *inode = page->mapping->host; 206 struct inode *inode = page->mapping->host;
204 int page_stat; 207 int page_stat;
205 208
@@ -218,11 +221,11 @@ static int __readpages_done(struct page_collect *pcol, bool do_unlock)
218 ret = update_read_page(page, page_stat); 221 ret = update_read_page(page, page_stat);
219 if (do_unlock) 222 if (do_unlock)
220 unlock_page(page); 223 unlock_page(page);
221 length += bvec->bv_len; 224 length += PAGE_SIZE;
222 } 225 }
223 226
224 pcol_free(pcol); 227 pcol_free(pcol);
225 EXOFS_DBGMSG("readpages_done END\n"); 228 EXOFS_DBGMSG2("readpages_done END\n");
226 return ret; 229 return ret;
227} 230}
228 231
@@ -238,11 +241,10 @@ static void readpages_done(struct exofs_io_state *ios, void *p)
238 241
239static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw) 242static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
240{ 243{
241 struct bio_vec *bvec;
242 int i; 244 int i;
243 245
244 __bio_for_each_segment(bvec, pcol->bio, i, 0) { 246 for (i = 0; i < pcol->nr_pages; i++) {
245 struct page *page = bvec->bv_page; 247 struct page *page = pcol->pages[i];
246 248
247 if (rw == READ) 249 if (rw == READ)
248 update_read_page(page, ret); 250 update_read_page(page, ret);
@@ -260,13 +262,14 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
260 struct page_collect *pcol_copy = NULL; 262 struct page_collect *pcol_copy = NULL;
261 int ret; 263 int ret;
262 264
263 if (!pcol->bio) 265 if (!pcol->pages)
264 return 0; 266 return 0;
265 267
266 /* see comment in _readpage() about sync reads */ 268 /* see comment in _readpage() about sync reads */
267 WARN_ON(is_sync && (pcol->nr_pages != 1)); 269 WARN_ON(is_sync && (pcol->nr_pages != 1));
268 270
269 ios->bio = pcol->bio; 271 ios->pages = pcol->pages;
272 ios->nr_pages = pcol->nr_pages;
270 ios->length = pcol->length; 273 ios->length = pcol->length;
271 ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT; 274 ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
272 275
@@ -290,7 +293,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
290 293
291 atomic_inc(&pcol->sbi->s_curr_pending); 294 atomic_inc(&pcol->sbi->s_curr_pending);
292 295
293 EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n", 296 EXOFS_DBGMSG2("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
294 ios->obj.id, _LLU(ios->offset), pcol->length); 297 ios->obj.id, _LLU(ios->offset), pcol->length);
295 298
296 /* pages ownership was passed to pcol_copy */ 299 /* pages ownership was passed to pcol_copy */
@@ -366,7 +369,7 @@ try_again:
366 goto try_again; 369 goto try_again;
367 } 370 }
368 371
369 if (!pcol->bio) { 372 if (!pcol->pages) {
370 ret = pcol_try_alloc(pcol); 373 ret = pcol_try_alloc(pcol);
371 if (unlikely(ret)) 374 if (unlikely(ret))
372 goto fail; 375 goto fail;
@@ -448,7 +451,6 @@ static int exofs_readpage(struct file *file, struct page *page)
448static void writepages_done(struct exofs_io_state *ios, void *p) 451static void writepages_done(struct exofs_io_state *ios, void *p)
449{ 452{
450 struct page_collect *pcol = p; 453 struct page_collect *pcol = p;
451 struct bio_vec *bvec;
452 int i; 454 int i;
453 u64 resid; 455 u64 resid;
454 u64 good_bytes; 456 u64 good_bytes;
@@ -462,13 +464,13 @@ static void writepages_done(struct exofs_io_state *ios, void *p)
462 else 464 else
463 good_bytes = pcol->length - resid; 465 good_bytes = pcol->length - resid;
464 466
465 EXOFS_DBGMSG("writepages_done(0x%lx) good_bytes=0x%llx" 467 EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
466 " length=0x%lx nr_pages=%u\n", 468 " length=0x%lx nr_pages=%u\n",
467 pcol->inode->i_ino, _LLU(good_bytes), pcol->length, 469 pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
468 pcol->nr_pages); 470 pcol->nr_pages);
469 471
470 __bio_for_each_segment(bvec, pcol->bio, i, 0) { 472 for (i = 0; i < pcol->nr_pages; i++) {
471 struct page *page = bvec->bv_page; 473 struct page *page = pcol->pages[i];
472 struct inode *inode = page->mapping->host; 474 struct inode *inode = page->mapping->host;
473 int page_stat; 475 int page_stat;
474 476
@@ -485,12 +487,12 @@ static void writepages_done(struct exofs_io_state *ios, void *p)
485 EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n", 487 EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
486 inode->i_ino, page->index, page_stat); 488 inode->i_ino, page->index, page_stat);
487 489
488 length += bvec->bv_len; 490 length += PAGE_SIZE;
489 } 491 }
490 492
491 pcol_free(pcol); 493 pcol_free(pcol);
492 kfree(pcol); 494 kfree(pcol);
493 EXOFS_DBGMSG("writepages_done END\n"); 495 EXOFS_DBGMSG2("writepages_done END\n");
494} 496}
495 497
496static int write_exec(struct page_collect *pcol) 498static int write_exec(struct page_collect *pcol)
@@ -500,7 +502,7 @@ static int write_exec(struct page_collect *pcol)
500 struct page_collect *pcol_copy = NULL; 502 struct page_collect *pcol_copy = NULL;
501 int ret; 503 int ret;
502 504
503 if (!pcol->bio) 505 if (!pcol->pages)
504 return 0; 506 return 0;
505 507
506 pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL); 508 pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
@@ -512,9 +514,8 @@ static int write_exec(struct page_collect *pcol)
512 514
513 *pcol_copy = *pcol; 515 *pcol_copy = *pcol;
514 516
515 pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ 517 ios->pages = pcol_copy->pages;
516 518 ios->nr_pages = pcol_copy->nr_pages;
517 ios->bio = pcol_copy->bio;
518 ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT; 519 ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
519 ios->length = pcol_copy->length; 520 ios->length = pcol_copy->length;
520 ios->done = writepages_done; 521 ios->done = writepages_done;
@@ -527,7 +528,7 @@ static int write_exec(struct page_collect *pcol)
527 } 528 }
528 529
529 atomic_inc(&pcol->sbi->s_curr_pending); 530 atomic_inc(&pcol->sbi->s_curr_pending);
530 EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n", 531 EXOFS_DBGMSG2("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
531 pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset), 532 pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
532 pcol->length); 533 pcol->length);
533 /* pages ownership was passed to pcol_copy */ 534 /* pages ownership was passed to pcol_copy */
@@ -605,7 +606,7 @@ try_again:
605 goto try_again; 606 goto try_again;
606 } 607 }
607 608
608 if (!pcol->bio) { 609 if (!pcol->pages) {
609 ret = pcol_try_alloc(pcol); 610 ret = pcol_try_alloc(pcol);
610 if (unlikely(ret)) 611 if (unlikely(ret))
611 goto fail; 612 goto fail;
@@ -616,7 +617,7 @@ try_again:
616 617
617 ret = pcol_add_page(pcol, page, len); 618 ret = pcol_add_page(pcol, page, len);
618 if (unlikely(ret)) { 619 if (unlikely(ret)) {
619 EXOFS_DBGMSG("Failed pcol_add_page " 620 EXOFS_DBGMSG2("Failed pcol_add_page "
620 "nr_pages=%u total_length=0x%lx\n", 621 "nr_pages=%u total_length=0x%lx\n",
621 pcol->nr_pages, pcol->length); 622 pcol->nr_pages, pcol->length);
622 623
@@ -663,7 +664,7 @@ static int exofs_writepages(struct address_space *mapping,
663 if (expected_pages < 32L) 664 if (expected_pages < 32L)
664 expected_pages = 32L; 665 expected_pages = 32L;
665 666
666 EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx " 667 EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
667 "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n", 668 "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
668 mapping->host->i_ino, wbc->range_start, wbc->range_end, 669 mapping->host->i_ino, wbc->range_start, wbc->range_end,
669 mapping->nrpages, start, end, expected_pages); 670 mapping->nrpages, start, end, expected_pages);
@@ -859,20 +860,33 @@ int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
859 return error; 860 return error;
860} 861}
861 862
863static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
864 EXOFS_APAGE_FS_DATA,
865 EXOFS_ATTR_INODE_FILE_LAYOUT,
866 0);
867static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
868 EXOFS_APAGE_FS_DATA,
869 EXOFS_ATTR_INODE_DIR_LAYOUT,
870 0);
871
862/* 872/*
863 * Read an inode from the OSD, and return it as is. We also return the size 873 * Read the Linux inode info from the OSD, and return it as is. In exofs the
864 * attribute in the 'obj_size' argument. 874 * inode info is in an application specific page/attribute of the osd-object.
865 */ 875 */
866static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi, 876static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
867 struct exofs_fcb *inode, uint64_t *obj_size) 877 struct exofs_fcb *inode)
868{ 878{
869 struct exofs_sb_info *sbi = sb->s_fs_info; 879 struct exofs_sb_info *sbi = sb->s_fs_info;
870 struct osd_attr attrs[2]; 880 struct osd_attr attrs[] = {
881 [0] = g_attr_inode_data,
882 [1] = g_attr_inode_file_layout,
883 [2] = g_attr_inode_dir_layout,
884 };
871 struct exofs_io_state *ios; 885 struct exofs_io_state *ios;
886 struct exofs_on_disk_inode_layout *layout;
872 int ret; 887 int ret;
873 888
874 *obj_size = ~0; 889 ret = exofs_get_io_state(&sbi->layout, &ios);
875 ret = exofs_get_io_state(sbi, &ios);
876 if (unlikely(ret)) { 890 if (unlikely(ret)) {
877 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__); 891 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
878 return ret; 892 return ret;
@@ -882,14 +896,25 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
882 exofs_make_credential(oi->i_cred, &ios->obj); 896 exofs_make_credential(oi->i_cred, &ios->obj);
883 ios->cred = oi->i_cred; 897 ios->cred = oi->i_cred;
884 898
885 attrs[0] = g_attr_inode_data; 899 attrs[1].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
886 attrs[1] = g_attr_logical_length; 900 attrs[2].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
901
887 ios->in_attr = attrs; 902 ios->in_attr = attrs;
888 ios->in_attr_len = ARRAY_SIZE(attrs); 903 ios->in_attr_len = ARRAY_SIZE(attrs);
889 904
890 ret = exofs_sbi_read(ios); 905 ret = exofs_sbi_read(ios);
891 if (ret) 906 if (unlikely(ret)) {
907 EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
908 _LLU(ios->obj.id), ret);
909 memset(inode, 0, sizeof(*inode));
910 inode->i_mode = 0040000 | (0777 & ~022);
911 /* If object is lost on target we might as well enable it's
912 * delete.
913 */
914 if ((ret == -ENOENT) || (ret == -EINVAL))
915 ret = 0;
892 goto out; 916 goto out;
917 }
893 918
894 ret = extract_attr_from_ios(ios, &attrs[0]); 919 ret = extract_attr_from_ios(ios, &attrs[0]);
895 if (ret) { 920 if (ret) {
@@ -901,11 +926,33 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
901 926
902 ret = extract_attr_from_ios(ios, &attrs[1]); 927 ret = extract_attr_from_ios(ios, &attrs[1]);
903 if (ret) { 928 if (ret) {
904 EXOFS_ERR("%s: extract_attr of logical_length failed\n", 929 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
905 __func__); 930 goto out;
931 }
932 if (attrs[1].len) {
933 layout = attrs[1].val_ptr;
934 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
935 EXOFS_ERR("%s: unsupported files layout %d\n",
936 __func__, layout->gen_func);
937 ret = -ENOTSUPP;
938 goto out;
939 }
940 }
941
942 ret = extract_attr_from_ios(ios, &attrs[2]);
943 if (ret) {
944 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
906 goto out; 945 goto out;
907 } 946 }
908 *obj_size = get_unaligned_be64(attrs[1].val_ptr); 947 if (attrs[2].len) {
948 layout = attrs[2].val_ptr;
949 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
950 EXOFS_ERR("%s: unsupported meta-data layout %d\n",
951 __func__, layout->gen_func);
952 ret = -ENOTSUPP;
953 goto out;
954 }
955 }
909 956
910out: 957out:
911 exofs_put_io_state(ios); 958 exofs_put_io_state(ios);
@@ -925,7 +972,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
925 struct exofs_i_info *oi; 972 struct exofs_i_info *oi;
926 struct exofs_fcb fcb; 973 struct exofs_fcb fcb;
927 struct inode *inode; 974 struct inode *inode;
928 uint64_t obj_size;
929 int ret; 975 int ret;
930 976
931 inode = iget_locked(sb, ino); 977 inode = iget_locked(sb, ino);
@@ -937,7 +983,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
937 __oi_init(oi); 983 __oi_init(oi);
938 984
939 /* read the inode from the osd */ 985 /* read the inode from the osd */
940 ret = exofs_get_inode(sb, oi, &fcb, &obj_size); 986 ret = exofs_get_inode(sb, oi, &fcb);
941 if (ret) 987 if (ret)
942 goto bad_inode; 988 goto bad_inode;
943 989
@@ -958,13 +1004,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
958 inode->i_blkbits = EXOFS_BLKSHIFT; 1004 inode->i_blkbits = EXOFS_BLKSHIFT;
959 inode->i_generation = le32_to_cpu(fcb.i_generation); 1005 inode->i_generation = le32_to_cpu(fcb.i_generation);
960 1006
961 if ((inode->i_size != obj_size) &&
962 (!exofs_inode_is_fast_symlink(inode))) {
963 EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n",
964 inode->i_size, _LLU(obj_size));
965 /* FIXME: call exofs_inode_recovery() */
966 }
967
968 oi->i_dir_start_lookup = 0; 1007 oi->i_dir_start_lookup = 0;
969 1008
970 if ((inode->i_nlink == 0) && (inode->i_mode == 0)) { 1009 if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
@@ -1043,7 +1082,7 @@ static void create_done(struct exofs_io_state *ios, void *p)
1043 1082
1044 if (unlikely(ret)) { 1083 if (unlikely(ret)) {
1045 EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx", 1084 EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
1046 _LLU(exofs_oi_objno(oi)), _LLU(sbi->s_pid)); 1085 _LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid));
1047 /*TODO: When FS is corrupted creation can fail, object already 1086 /*TODO: When FS is corrupted creation can fail, object already
1048 * exist. Get rid of this asynchronous creation, if exist 1087 * exist. Get rid of this asynchronous creation, if exist
1049 * increment the obj counter and try the next object. Until we 1088 * increment the obj counter and try the next object. Until we
@@ -1104,7 +1143,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
1104 1143
1105 mark_inode_dirty(inode); 1144 mark_inode_dirty(inode);
1106 1145
1107 ret = exofs_get_io_state(sbi, &ios); 1146 ret = exofs_get_io_state(&sbi->layout, &ios);
1108 if (unlikely(ret)) { 1147 if (unlikely(ret)) {
1109 EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n"); 1148 EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
1110 return ERR_PTR(ret); 1149 return ERR_PTR(ret);
@@ -1170,8 +1209,10 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
1170 int ret; 1209 int ret;
1171 1210
1172 args = kzalloc(sizeof(*args), GFP_KERNEL); 1211 args = kzalloc(sizeof(*args), GFP_KERNEL);
1173 if (!args) 1212 if (!args) {
1213 EXOFS_DBGMSG("Faild kzalloc of args\n");
1174 return -ENOMEM; 1214 return -ENOMEM;
1215 }
1175 1216
1176 fcb = &args->fcb; 1217 fcb = &args->fcb;
1177 1218
@@ -1200,7 +1241,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
1200 } else 1241 } else
1201 memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data)); 1242 memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
1202 1243
1203 ret = exofs_get_io_state(sbi, &ios); 1244 ret = exofs_get_io_state(&sbi->layout, &ios);
1204 if (unlikely(ret)) { 1245 if (unlikely(ret)) {
1205 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__); 1246 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
1206 goto free_args; 1247 goto free_args;
@@ -1234,13 +1275,14 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
1234free_args: 1275free_args:
1235 kfree(args); 1276 kfree(args);
1236out: 1277out:
1237 EXOFS_DBGMSG("ret=>%d\n", ret); 1278 EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
1279 inode->i_ino, do_sync, ret);
1238 return ret; 1280 return ret;
1239} 1281}
1240 1282
1241int exofs_write_inode(struct inode *inode, int wait) 1283int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
1242{ 1284{
1243 return exofs_update_inode(inode, wait); 1285 return exofs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1244} 1286}
1245 1287
1246/* 1288/*
@@ -1283,7 +1325,7 @@ void exofs_delete_inode(struct inode *inode)
1283 1325
1284 clear_inode(inode); 1326 clear_inode(inode);
1285 1327
1286 ret = exofs_get_io_state(sbi, &ios); 1328 ret = exofs_get_io_state(&sbi->layout, &ios);
1287 if (unlikely(ret)) { 1329 if (unlikely(ret)) {
1288 EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__); 1330 EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
1289 return; 1331 return;
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c
index 5bad01fa1f9f..5293bc411d17 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ios.c
@@ -23,9 +23,13 @@
23 */ 23 */
24 24
25#include <scsi/scsi_device.h> 25#include <scsi/scsi_device.h>
26#include <asm/div64.h>
26 27
27#include "exofs.h" 28#include "exofs.h"
28 29
30#define EXOFS_DBGMSG2(M...) do {} while (0)
31/* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */
32
29void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj) 33void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
30{ 34{
31 osd_sec_init_nosec_doall_caps(cred_a, obj, false, true); 35 osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
@@ -64,21 +68,24 @@ out:
64 return ret; 68 return ret;
65} 69}
66 70
67int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** pios) 71int exofs_get_io_state(struct exofs_layout *layout,
72 struct exofs_io_state **pios)
68{ 73{
69 struct exofs_io_state *ios; 74 struct exofs_io_state *ios;
70 75
71 /*TODO: Maybe use kmem_cach per sbi of size 76 /*TODO: Maybe use kmem_cach per sbi of size
72 * exofs_io_state_size(sbi->s_numdevs) 77 * exofs_io_state_size(layout->s_numdevs)
73 */ 78 */
74 ios = kzalloc(exofs_io_state_size(sbi->s_numdevs), GFP_KERNEL); 79 ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
75 if (unlikely(!ios)) { 80 if (unlikely(!ios)) {
81 EXOFS_DBGMSG("Faild kzalloc bytes=%d\n",
82 exofs_io_state_size(layout->s_numdevs));
76 *pios = NULL; 83 *pios = NULL;
77 return -ENOMEM; 84 return -ENOMEM;
78 } 85 }
79 86
80 ios->sbi = sbi; 87 ios->layout = layout;
81 ios->obj.partition = sbi->s_pid; 88 ios->obj.partition = layout->s_pid;
82 *pios = ios; 89 *pios = ios;
83 return 0; 90 return 0;
84} 91}
@@ -101,6 +108,29 @@ void exofs_put_io_state(struct exofs_io_state *ios)
101 } 108 }
102} 109}
103 110
111unsigned exofs_layout_od_id(struct exofs_layout *layout,
112 osd_id obj_no, unsigned layout_index)
113{
114/* switch (layout->lay_func) {
115 case LAYOUT_MOVING_WINDOW:
116 {*/
117 unsigned dev_mod = obj_no;
118
119 return (layout_index + dev_mod * layout->mirrors_p1) %
120 layout->s_numdevs;
121/* }
122 case LAYOUT_FUNC_IMPLICT:
123 return layout->devs[layout_index];
124 }*/
125}
126
127static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios,
128 unsigned layout_index)
129{
130 return ios->layout->s_ods[
131 exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)];
132}
133
104static void _sync_done(struct exofs_io_state *ios, void *p) 134static void _sync_done(struct exofs_io_state *ios, void *p)
105{ 135{
106 struct completion *waiting = p; 136 struct completion *waiting = p;
@@ -168,6 +198,21 @@ static int exofs_io_execute(struct exofs_io_state *ios)
168 return ret; 198 return ret;
169} 199}
170 200
201static void _clear_bio(struct bio *bio)
202{
203 struct bio_vec *bv;
204 unsigned i;
205
206 __bio_for_each_segment(bv, bio, i, 0) {
207 unsigned this_count = bv->bv_len;
208
209 if (likely(PAGE_SIZE == this_count))
210 clear_highpage(bv->bv_page);
211 else
212 zero_user(bv->bv_page, bv->bv_offset, this_count);
213 }
214}
215
171int exofs_check_io(struct exofs_io_state *ios, u64 *resid) 216int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
172{ 217{
173 enum osd_err_priority acumulated_osd_err = 0; 218 enum osd_err_priority acumulated_osd_err = 0;
@@ -176,16 +221,25 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
176 221
177 for (i = 0; i < ios->numdevs; i++) { 222 for (i = 0; i < ios->numdevs; i++) {
178 struct osd_sense_info osi; 223 struct osd_sense_info osi;
179 int ret = osd_req_decode_sense(ios->per_dev[i].or, &osi); 224 struct osd_request *or = ios->per_dev[i].or;
225 int ret;
226
227 if (unlikely(!or))
228 continue;
180 229
230 ret = osd_req_decode_sense(or, &osi);
181 if (likely(!ret)) 231 if (likely(!ret))
182 continue; 232 continue;
183 233
184 if (unlikely(ret == -EFAULT)) { 234 if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
185 EXOFS_DBGMSG("%s: EFAULT Need page clear\n", __func__); 235 /* start read offset passed endof file */
186 /*FIXME: All the pages in this device range should: 236 _clear_bio(ios->per_dev[i].bio);
187 * clear_highpage(page); 237 EXOFS_DBGMSG("start read offset passed end of file "
188 */ 238 "offset=0x%llx, length=0x%llx\n",
239 _LLU(ios->per_dev[i].offset),
240 _LLU(ios->per_dev[i].length));
241
242 continue; /* we recovered */
189 } 243 }
190 244
191 if (osi.osd_err_pri >= acumulated_osd_err) { 245 if (osi.osd_err_pri >= acumulated_osd_err) {
@@ -205,14 +259,259 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
205 return acumulated_lin_err; 259 return acumulated_lin_err;
206} 260}
207 261
262/*
263 * L - logical offset into the file
264 *
265 * U - The number of bytes in a stripe within a group
266 *
267 * U = stripe_unit * group_width
268 *
269 * T - The number of bytes striped within a group of component objects
270 * (before advancing to the next group)
271 *
272 * T = stripe_unit * group_width * group_depth
273 *
274 * S - The number of bytes striped across all component objects
275 * before the pattern repeats
276 *
277 * S = stripe_unit * group_width * group_depth * group_count
278 *
279 * M - The "major" (i.e., across all components) stripe number
280 *
281 * M = L / S
282 *
283 * G - Counts the groups from the beginning of the major stripe
284 *
285 * G = (L - (M * S)) / T [or (L % S) / T]
286 *
287 * H - The byte offset within the group
288 *
289 * H = (L - (M * S)) % T [or (L % S) % T]
290 *
291 * N - The "minor" (i.e., across the group) stripe number
292 *
293 * N = H / U
294 *
295 * C - The component index coresponding to L
296 *
297 * C = (H - (N * U)) / stripe_unit + G * group_width
298 * [or (L % U) / stripe_unit + G * group_width]
299 *
300 * O - The component offset coresponding to L
301 *
302 * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
303 */
304struct _striping_info {
305 u64 obj_offset;
306 u64 group_length;
307 u64 total_group_length;
308 u64 Major;
309 unsigned dev;
310 unsigned unit_off;
311};
312
313static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
314 struct _striping_info *si)
315{
316 u32 stripe_unit = ios->layout->stripe_unit;
317 u32 group_width = ios->layout->group_width;
318 u64 group_depth = ios->layout->group_depth;
319
320 u32 U = stripe_unit * group_width;
321 u64 T = U * group_depth;
322 u64 S = T * ios->layout->group_count;
323 u64 M = div64_u64(file_offset, S);
324
325 /*
326 G = (L - (M * S)) / T
327 H = (L - (M * S)) % T
328 */
329 u64 LmodS = file_offset - M * S;
330 u32 G = div64_u64(LmodS, T);
331 u64 H = LmodS - G * T;
332
333 u32 N = div_u64(H, U);
334
335 /* "H - (N * U)" is just "H % U" so it's bound to u32 */
336 si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
337 si->dev *= ios->layout->mirrors_p1;
338
339 div_u64_rem(file_offset, stripe_unit, &si->unit_off);
340
341 si->obj_offset = si->unit_off + (N * stripe_unit) +
342 (M * group_depth * stripe_unit);
343
344 si->group_length = T - H;
345 si->total_group_length = T;
346 si->Major = M;
347}
348
349static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
350 unsigned pgbase, struct exofs_per_dev_state *per_dev,
351 int cur_len)
352{
353 unsigned pg = *cur_pg;
354 struct request_queue *q =
355 osd_request_queue(exofs_ios_od(ios, per_dev->dev));
356
357 per_dev->length += cur_len;
358
359 if (per_dev->bio == NULL) {
360 unsigned pages_in_stripe = ios->layout->group_width *
361 (ios->layout->stripe_unit / PAGE_SIZE);
362 unsigned bio_size = (ios->nr_pages + pages_in_stripe) /
363 ios->layout->group_width;
364
365 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
366 if (unlikely(!per_dev->bio)) {
367 EXOFS_DBGMSG("Faild to allocate BIO size=%u\n",
368 bio_size);
369 return -ENOMEM;
370 }
371 }
372
373 while (cur_len > 0) {
374 unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
375 unsigned added_len;
376
377 BUG_ON(ios->nr_pages <= pg);
378 cur_len -= pglen;
379
380 added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg],
381 pglen, pgbase);
382 if (unlikely(pglen != added_len))
383 return -ENOMEM;
384 pgbase = 0;
385 ++pg;
386 }
387 BUG_ON(cur_len);
388
389 *cur_pg = pg;
390 return 0;
391}
392
393static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
394 struct _striping_info *si, unsigned first_comp)
395{
396 unsigned stripe_unit = ios->layout->stripe_unit;
397 unsigned mirrors_p1 = ios->layout->mirrors_p1;
398 unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
399 unsigned dev = si->dev;
400 unsigned first_dev = dev - (dev % devs_in_group);
401 unsigned comp = first_comp + (dev - first_dev);
402 unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0;
403 unsigned cur_pg = ios->pages_consumed;
404 int ret = 0;
405
406 while (length) {
407 struct exofs_per_dev_state *per_dev = &ios->per_dev[comp];
408 unsigned cur_len, page_off = 0;
409
410 if (!per_dev->length) {
411 per_dev->dev = dev;
412 if (dev < si->dev) {
413 per_dev->offset = si->obj_offset + stripe_unit -
414 si->unit_off;
415 cur_len = stripe_unit;
416 } else if (dev == si->dev) {
417 per_dev->offset = si->obj_offset;
418 cur_len = stripe_unit - si->unit_off;
419 page_off = si->unit_off & ~PAGE_MASK;
420 BUG_ON(page_off && (page_off != ios->pgbase));
421 } else { /* dev > si->dev */
422 per_dev->offset = si->obj_offset - si->unit_off;
423 cur_len = stripe_unit;
424 }
425
426 if (max_comp < comp)
427 max_comp = comp;
428
429 dev += mirrors_p1;
430 dev = (dev % devs_in_group) + first_dev;
431 } else {
432 cur_len = stripe_unit;
433 }
434 if (cur_len >= length)
435 cur_len = length;
436
437 ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
438 cur_len);
439 if (unlikely(ret))
440 goto out;
441
442 comp += mirrors_p1;
443 comp = (comp % devs_in_group) + first_comp;
444
445 length -= cur_len;
446 }
447out:
448 ios->numdevs = max_comp + mirrors_p1;
449 ios->pages_consumed = cur_pg;
450 return ret;
451}
452
453static int _prepare_for_striping(struct exofs_io_state *ios)
454{
455 u64 length = ios->length;
456 struct _striping_info si;
457 unsigned devs_in_group = ios->layout->group_width *
458 ios->layout->mirrors_p1;
459 unsigned first_comp = 0;
460 int ret = 0;
461
462 _calc_stripe_info(ios, ios->offset, &si);
463
464 if (!ios->pages) {
465 if (ios->kern_buff) {
466 struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
467
468 per_dev->offset = si.obj_offset;
469 per_dev->dev = si.dev;
470
471 /* no cross device without page array */
472 BUG_ON((ios->layout->group_width > 1) &&
473 (si.unit_off + ios->length >
474 ios->layout->stripe_unit));
475 }
476 ios->numdevs = ios->layout->mirrors_p1;
477 return 0;
478 }
479
480 while (length) {
481 if (length < si.group_length)
482 si.group_length = length;
483
484 ret = _prepare_one_group(ios, si.group_length, &si, first_comp);
485 if (unlikely(ret))
486 goto out;
487
488 length -= si.group_length;
489
490 si.group_length = si.total_group_length;
491 si.unit_off = 0;
492 ++si.Major;
493 si.obj_offset = si.Major * ios->layout->stripe_unit *
494 ios->layout->group_depth;
495
496 si.dev = (si.dev - (si.dev % devs_in_group)) + devs_in_group;
497 si.dev %= ios->layout->s_numdevs;
498
499 first_comp += devs_in_group;
500 first_comp %= ios->layout->s_numdevs;
501 }
502
503out:
504 return ret;
505}
506
208int exofs_sbi_create(struct exofs_io_state *ios) 507int exofs_sbi_create(struct exofs_io_state *ios)
209{ 508{
210 int i, ret; 509 int i, ret;
211 510
212 for (i = 0; i < ios->sbi->s_numdevs; i++) { 511 for (i = 0; i < ios->layout->s_numdevs; i++) {
213 struct osd_request *or; 512 struct osd_request *or;
214 513
215 or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); 514 or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
216 if (unlikely(!or)) { 515 if (unlikely(!or)) {
217 EXOFS_ERR("%s: osd_start_request failed\n", __func__); 516 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
218 ret = -ENOMEM; 517 ret = -ENOMEM;
@@ -233,10 +532,10 @@ int exofs_sbi_remove(struct exofs_io_state *ios)
233{ 532{
234 int i, ret; 533 int i, ret;
235 534
236 for (i = 0; i < ios->sbi->s_numdevs; i++) { 535 for (i = 0; i < ios->layout->s_numdevs; i++) {
237 struct osd_request *or; 536 struct osd_request *or;
238 537
239 or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); 538 or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
240 if (unlikely(!or)) { 539 if (unlikely(!or)) {
241 EXOFS_ERR("%s: osd_start_request failed\n", __func__); 540 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
242 ret = -ENOMEM; 541 ret = -ENOMEM;
@@ -253,51 +552,74 @@ out:
253 return ret; 552 return ret;
254} 553}
255 554
256int exofs_sbi_write(struct exofs_io_state *ios) 555static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
257{ 556{
258 int i, ret; 557 struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp];
558 unsigned dev = ios->per_dev[cur_comp].dev;
559 unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
560 int ret = 0;
259 561
260 for (i = 0; i < ios->sbi->s_numdevs; i++) { 562 if (ios->pages && !master_dev->length)
563 return 0; /* Just an empty slot */
564
565 for (; cur_comp < last_comp; ++cur_comp, ++dev) {
566 struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
261 struct osd_request *or; 567 struct osd_request *or;
262 568
263 or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); 569 or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL);
264 if (unlikely(!or)) { 570 if (unlikely(!or)) {
265 EXOFS_ERR("%s: osd_start_request failed\n", __func__); 571 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
266 ret = -ENOMEM; 572 ret = -ENOMEM;
267 goto out; 573 goto out;
268 } 574 }
269 ios->per_dev[i].or = or; 575 per_dev->or = or;
270 ios->numdevs++; 576 per_dev->offset = master_dev->offset;
271 577
272 if (ios->bio) { 578 if (ios->pages) {
273 struct bio *bio; 579 struct bio *bio;
274 580
275 if (i != 0) { 581 if (per_dev != master_dev) {
276 bio = bio_kmalloc(GFP_KERNEL, 582 bio = bio_kmalloc(GFP_KERNEL,
277 ios->bio->bi_max_vecs); 583 master_dev->bio->bi_max_vecs);
278 if (unlikely(!bio)) { 584 if (unlikely(!bio)) {
585 EXOFS_DBGMSG(
586 "Faild to allocate BIO size=%u\n",
587 master_dev->bio->bi_max_vecs);
279 ret = -ENOMEM; 588 ret = -ENOMEM;
280 goto out; 589 goto out;
281 } 590 }
282 591
283 __bio_clone(bio, ios->bio); 592 __bio_clone(bio, master_dev->bio);
284 bio->bi_bdev = NULL; 593 bio->bi_bdev = NULL;
285 bio->bi_next = NULL; 594 bio->bi_next = NULL;
286 ios->per_dev[i].bio = bio; 595 per_dev->length = master_dev->length;
596 per_dev->bio = bio;
597 per_dev->dev = dev;
287 } else { 598 } else {
288 bio = ios->bio; 599 bio = master_dev->bio;
600 /* FIXME: bio_set_dir() */
601 bio->bi_rw |= (1 << BIO_RW);
289 } 602 }
290 603
291 osd_req_write(or, &ios->obj, ios->offset, bio, 604 osd_req_write(or, &ios->obj, per_dev->offset, bio,
292 ios->length); 605 per_dev->length);
293/* EXOFS_DBGMSG("write sync=%d\n", sync);*/ 606 EXOFS_DBGMSG("write(0x%llx) offset=0x%llx "
607 "length=0x%llx dev=%d\n",
608 _LLU(ios->obj.id), _LLU(per_dev->offset),
609 _LLU(per_dev->length), dev);
294 } else if (ios->kern_buff) { 610 } else if (ios->kern_buff) {
295 osd_req_write_kern(or, &ios->obj, ios->offset, 611 ret = osd_req_write_kern(or, &ios->obj, per_dev->offset,
296 ios->kern_buff, ios->length); 612 ios->kern_buff, ios->length);
297/* EXOFS_DBGMSG("write_kern sync=%d\n", sync);*/ 613 if (unlikely(ret))
614 goto out;
615 EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
616 "length=0x%llx dev=%d\n",
617 _LLU(ios->obj.id), _LLU(per_dev->offset),
618 _LLU(ios->length), dev);
298 } else { 619 } else {
299 osd_req_set_attributes(or, &ios->obj); 620 osd_req_set_attributes(or, &ios->obj);
300/* EXOFS_DBGMSG("set_attributes sync=%d\n", sync);*/ 621 EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
622 _LLU(ios->obj.id), ios->out_attr_len, dev);
301 } 623 }
302 624
303 if (ios->out_attr) 625 if (ios->out_attr)
@@ -308,54 +630,93 @@ int exofs_sbi_write(struct exofs_io_state *ios)
308 osd_req_add_get_attr_list(or, ios->in_attr, 630 osd_req_add_get_attr_list(or, ios->in_attr,
309 ios->in_attr_len); 631 ios->in_attr_len);
310 } 632 }
311 ret = exofs_io_execute(ios);
312 633
313out: 634out:
314 return ret; 635 return ret;
315} 636}
316 637
317int exofs_sbi_read(struct exofs_io_state *ios) 638int exofs_sbi_write(struct exofs_io_state *ios)
318{ 639{
319 int i, ret; 640 int i;
641 int ret;
320 642
321 for (i = 0; i < 1; i++) { 643 ret = _prepare_for_striping(ios);
322 struct osd_request *or; 644 if (unlikely(ret))
323 unsigned first_dev = (unsigned)ios->obj.id; 645 return ret;
324 646
325 first_dev %= ios->sbi->s_numdevs; 647 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
326 or = osd_start_request(ios->sbi->s_ods[first_dev], GFP_KERNEL); 648 ret = _sbi_write_mirror(ios, i);
327 if (unlikely(!or)) { 649 if (unlikely(ret))
328 EXOFS_ERR("%s: osd_start_request failed\n", __func__); 650 return ret;
329 ret = -ENOMEM; 651 }
330 goto out;
331 }
332 ios->per_dev[i].or = or;
333 ios->numdevs++;
334 652
335 if (ios->bio) { 653 ret = exofs_io_execute(ios);
336 osd_req_read(or, &ios->obj, ios->offset, ios->bio, 654 return ret;
337 ios->length); 655}
338/* EXOFS_DBGMSG("read sync=%d\n", sync);*/
339 } else if (ios->kern_buff) {
340 osd_req_read_kern(or, &ios->obj, ios->offset,
341 ios->kern_buff, ios->length);
342/* EXOFS_DBGMSG("read_kern sync=%d\n", sync);*/
343 } else {
344 osd_req_get_attributes(or, &ios->obj);
345/* EXOFS_DBGMSG("get_attributes sync=%d\n", sync);*/
346 }
347 656
348 if (ios->out_attr) 657static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
349 osd_req_add_set_attr_list(or, ios->out_attr, 658{
350 ios->out_attr_len); 659 struct osd_request *or;
660 struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
661 unsigned first_dev = (unsigned)ios->obj.id;
351 662
352 if (ios->in_attr) 663 if (ios->pages && !per_dev->length)
353 osd_req_add_get_attr_list(or, ios->in_attr, 664 return 0; /* Just an empty slot */
354 ios->in_attr_len); 665
666 first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
667 or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL);
668 if (unlikely(!or)) {
669 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
670 return -ENOMEM;
355 } 671 }
356 ret = exofs_io_execute(ios); 672 per_dev->or = or;
673
674 if (ios->pages) {
675 osd_req_read(or, &ios->obj, per_dev->offset,
676 per_dev->bio, per_dev->length);
677 EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
678 " dev=%d\n", _LLU(ios->obj.id),
679 _LLU(per_dev->offset), _LLU(per_dev->length),
680 first_dev);
681 } else if (ios->kern_buff) {
682 int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset,
683 ios->kern_buff, ios->length);
684 EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
685 "length=0x%llx dev=%d ret=>%d\n",
686 _LLU(ios->obj.id), _LLU(per_dev->offset),
687 _LLU(ios->length), first_dev, ret);
688 if (unlikely(ret))
689 return ret;
690 } else {
691 osd_req_get_attributes(or, &ios->obj);
692 EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
693 _LLU(ios->obj.id), ios->in_attr_len, first_dev);
694 }
695 if (ios->out_attr)
696 osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
357 697
358out: 698 if (ios->in_attr)
699 osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
700
701 return 0;
702}
703
704int exofs_sbi_read(struct exofs_io_state *ios)
705{
706 int i;
707 int ret;
708
709 ret = _prepare_for_striping(ios);
710 if (unlikely(ret))
711 return ret;
712
713 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
714 ret = _sbi_read_mirror(ios, i);
715 if (unlikely(ret))
716 return ret;
717 }
718
719 ret = exofs_io_execute(ios);
359 return ret; 720 return ret;
360} 721}
361 722
@@ -380,42 +741,82 @@ int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
380 return -EIO; 741 return -EIO;
381} 742}
382 743
744static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp,
745 struct osd_attr *attr)
746{
747 int last_comp = cur_comp + ios->layout->mirrors_p1;
748
749 for (; cur_comp < last_comp; ++cur_comp) {
750 struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
751 struct osd_request *or;
752
753 or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL);
754 if (unlikely(!or)) {
755 EXOFS_ERR("%s: osd_start_request failed\n", __func__);
756 return -ENOMEM;
757 }
758 per_dev->or = or;
759
760 osd_req_set_attributes(or, &ios->obj);
761 osd_req_add_set_attr_list(or, attr, 1);
762 }
763
764 return 0;
765}
766
383int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) 767int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
384{ 768{
385 struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info; 769 struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
386 struct exofs_io_state *ios; 770 struct exofs_io_state *ios;
387 struct osd_attr attr; 771 struct exofs_trunc_attr {
388 __be64 newsize; 772 struct osd_attr attr;
773 __be64 newsize;
774 } *size_attrs;
775 struct _striping_info si;
389 int i, ret; 776 int i, ret;
390 777
391 if (exofs_get_io_state(sbi, &ios)) 778 ret = exofs_get_io_state(&sbi->layout, &ios);
392 return -ENOMEM; 779 if (unlikely(ret))
780 return ret;
781
782 size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs),
783 GFP_KERNEL);
784 if (unlikely(!size_attrs)) {
785 ret = -ENOMEM;
786 goto out;
787 }
393 788
394 ios->obj.id = exofs_oi_objno(oi); 789 ios->obj.id = exofs_oi_objno(oi);
395 ios->cred = oi->i_cred; 790 ios->cred = oi->i_cred;
396 791
397 newsize = cpu_to_be64(size); 792 ios->numdevs = ios->layout->s_numdevs;
398 attr = g_attr_logical_length; 793 _calc_stripe_info(ios, size, &si);
399 attr.val_ptr = &newsize;
400 794
401 for (i = 0; i < sbi->s_numdevs; i++) { 795 for (i = 0; i < ios->layout->group_width; ++i) {
402 struct osd_request *or; 796 struct exofs_trunc_attr *size_attr = &size_attrs[i];
797 u64 obj_size;
403 798
404 or = osd_start_request(sbi->s_ods[i], GFP_KERNEL); 799 if (i < si.dev)
405 if (unlikely(!or)) { 800 obj_size = si.obj_offset +
406 EXOFS_ERR("%s: osd_start_request failed\n", __func__); 801 ios->layout->stripe_unit - si.unit_off;
407 ret = -ENOMEM; 802 else if (i == si.dev)
408 goto out; 803 obj_size = si.obj_offset;
409 } 804 else /* i > si.dev */
410 ios->per_dev[i].or = or; 805 obj_size = si.obj_offset - si.unit_off;
411 ios->numdevs++;
412 806
413 osd_req_set_attributes(or, &ios->obj); 807 size_attr->newsize = cpu_to_be64(obj_size);
414 osd_req_add_set_attr_list(or, &attr, 1); 808 size_attr->attr = g_attr_logical_length;
809 size_attr->attr.val_ptr = &size_attr->newsize;
810
811 ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
812 &size_attr->attr);
813 if (unlikely(ret))
814 goto out;
415 } 815 }
416 ret = exofs_io_execute(ios); 816 ret = exofs_io_execute(ios);
417 817
418out: 818out:
819 kfree(size_attrs);
419 exofs_put_io_state(ios); 820 exofs_put_io_state(ios);
420 return ret; 821 return ret;
421} 822}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index a1d1e77b12eb..6cf5e4e84d61 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -210,7 +210,7 @@ int exofs_sync_fs(struct super_block *sb, int wait)
210 sbi = sb->s_fs_info; 210 sbi = sb->s_fs_info;
211 fscb = &sbi->s_fscb; 211 fscb = &sbi->s_fscb;
212 212
213 ret = exofs_get_io_state(sbi, &ios); 213 ret = exofs_get_io_state(&sbi->layout, &ios);
214 if (ret) 214 if (ret)
215 goto out; 215 goto out;
216 216
@@ -264,12 +264,12 @@ static void _exofs_print_device(const char *msg, const char *dev_path,
264 264
265void exofs_free_sbi(struct exofs_sb_info *sbi) 265void exofs_free_sbi(struct exofs_sb_info *sbi)
266{ 266{
267 while (sbi->s_numdevs) { 267 while (sbi->layout.s_numdevs) {
268 int i = --sbi->s_numdevs; 268 int i = --sbi->layout.s_numdevs;
269 struct osd_dev *od = sbi->s_ods[i]; 269 struct osd_dev *od = sbi->layout.s_ods[i];
270 270
271 if (od) { 271 if (od) {
272 sbi->s_ods[i] = NULL; 272 sbi->layout.s_ods[i] = NULL;
273 osduld_put_device(od); 273 osduld_put_device(od);
274 } 274 }
275 } 275 }
@@ -298,7 +298,8 @@ static void exofs_put_super(struct super_block *sb)
298 msecs_to_jiffies(100)); 298 msecs_to_jiffies(100));
299 } 299 }
300 300
301 _exofs_print_device("Unmounting", NULL, sbi->s_ods[0], sbi->s_pid); 301 _exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0],
302 sbi->layout.s_pid);
302 303
303 exofs_free_sbi(sbi); 304 exofs_free_sbi(sbi);
304 sb->s_fs_info = NULL; 305 sb->s_fs_info = NULL;
@@ -307,6 +308,8 @@ static void exofs_put_super(struct super_block *sb)
307static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs, 308static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
308 struct exofs_device_table *dt) 309 struct exofs_device_table *dt)
309{ 310{
311 u64 stripe_length;
312
310 sbi->data_map.odm_num_comps = 313 sbi->data_map.odm_num_comps =
311 le32_to_cpu(dt->dt_data_map.cb_num_comps); 314 le32_to_cpu(dt->dt_data_map.cb_num_comps);
312 sbi->data_map.odm_stripe_unit = 315 sbi->data_map.odm_stripe_unit =
@@ -320,14 +323,63 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
320 sbi->data_map.odm_raid_algorithm = 323 sbi->data_map.odm_raid_algorithm =
321 le32_to_cpu(dt->dt_data_map.cb_raid_algorithm); 324 le32_to_cpu(dt->dt_data_map.cb_raid_algorithm);
322 325
323/* FIXME: Hard coded mirror only for now. if not so do not mount */ 326/* FIXME: Only raid0 for now. if not so, do not mount */
324 if ((sbi->data_map.odm_num_comps != numdevs) || 327 if (sbi->data_map.odm_num_comps != numdevs) {
325 (sbi->data_map.odm_stripe_unit != EXOFS_BLKSIZE) || 328 EXOFS_ERR("odm_num_comps(%u) != numdevs(%u)\n",
326 (sbi->data_map.odm_raid_algorithm != PNFS_OSD_RAID_0) || 329 sbi->data_map.odm_num_comps, numdevs);
327 (sbi->data_map.odm_mirror_cnt != (numdevs - 1)))
328 return -EINVAL; 330 return -EINVAL;
329 else 331 }
330 return 0; 332 if (sbi->data_map.odm_raid_algorithm != PNFS_OSD_RAID_0) {
333 EXOFS_ERR("Only RAID_0 for now\n");
334 return -EINVAL;
335 }
336 if (0 != (numdevs % (sbi->data_map.odm_mirror_cnt + 1))) {
337 EXOFS_ERR("Data Map wrong, numdevs=%d mirrors=%d\n",
338 numdevs, sbi->data_map.odm_mirror_cnt);
339 return -EINVAL;
340 }
341
342 if (0 != (sbi->data_map.odm_stripe_unit & ~PAGE_MASK)) {
343 EXOFS_ERR("Stripe Unit(0x%llx)"
344 " must be Multples of PAGE_SIZE(0x%lx)\n",
345 _LLU(sbi->data_map.odm_stripe_unit), PAGE_SIZE);
346 return -EINVAL;
347 }
348
349 sbi->layout.stripe_unit = sbi->data_map.odm_stripe_unit;
350 sbi->layout.mirrors_p1 = sbi->data_map.odm_mirror_cnt + 1;
351
352 if (sbi->data_map.odm_group_width) {
353 sbi->layout.group_width = sbi->data_map.odm_group_width;
354 sbi->layout.group_depth = sbi->data_map.odm_group_depth;
355 if (!sbi->layout.group_depth) {
356 EXOFS_ERR("group_depth == 0 && group_width != 0\n");
357 return -EINVAL;
358 }
359 sbi->layout.group_count = sbi->data_map.odm_num_comps /
360 sbi->layout.mirrors_p1 /
361 sbi->data_map.odm_group_width;
362 } else {
363 if (sbi->data_map.odm_group_depth) {
364 printk(KERN_NOTICE "Warning: group_depth ignored "
365 "group_width == 0 && group_depth == %d\n",
366 sbi->data_map.odm_group_depth);
367 sbi->data_map.odm_group_depth = 0;
368 }
369 sbi->layout.group_width = sbi->data_map.odm_num_comps /
370 sbi->layout.mirrors_p1;
371 sbi->layout.group_depth = -1;
372 sbi->layout.group_count = 1;
373 }
374
375 stripe_length = (u64)sbi->layout.group_width * sbi->layout.stripe_unit;
376 if (stripe_length >= (1ULL << 32)) {
377 EXOFS_ERR("Total Stripe length(0x%llx)"
378 " >= 32bit is not supported\n", _LLU(stripe_length));
379 return -EINVAL;
380 }
381
382 return 0;
331} 383}
332 384
333/* @odi is valid only as long as @fscb_dev is valid */ 385/* @odi is valid only as long as @fscb_dev is valid */
@@ -361,7 +413,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
361{ 413{
362 struct exofs_sb_info *sbi = *psbi; 414 struct exofs_sb_info *sbi = *psbi;
363 struct osd_dev *fscb_od; 415 struct osd_dev *fscb_od;
364 struct osd_obj_id obj = {.partition = sbi->s_pid, 416 struct osd_obj_id obj = {.partition = sbi->layout.s_pid,
365 .id = EXOFS_DEVTABLE_ID}; 417 .id = EXOFS_DEVTABLE_ID};
366 struct exofs_device_table *dt; 418 struct exofs_device_table *dt;
367 unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) + 419 unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) +
@@ -376,9 +428,9 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
376 return -ENOMEM; 428 return -ENOMEM;
377 } 429 }
378 430
379 fscb_od = sbi->s_ods[0]; 431 fscb_od = sbi->layout.s_ods[0];
380 sbi->s_ods[0] = NULL; 432 sbi->layout.s_ods[0] = NULL;
381 sbi->s_numdevs = 0; 433 sbi->layout.s_numdevs = 0;
382 ret = exofs_read_kern(fscb_od, sbi->s_cred, &obj, 0, dt, table_bytes); 434 ret = exofs_read_kern(fscb_od, sbi->s_cred, &obj, 0, dt, table_bytes);
383 if (unlikely(ret)) { 435 if (unlikely(ret)) {
384 EXOFS_ERR("ERROR: reading device table\n"); 436 EXOFS_ERR("ERROR: reading device table\n");
@@ -397,14 +449,15 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
397 goto out; 449 goto out;
398 450
399 if (likely(numdevs > 1)) { 451 if (likely(numdevs > 1)) {
400 unsigned size = numdevs * sizeof(sbi->s_ods[0]); 452 unsigned size = numdevs * sizeof(sbi->layout.s_ods[0]);
401 453
402 sbi = krealloc(sbi, sizeof(*sbi) + size, GFP_KERNEL); 454 sbi = krealloc(sbi, sizeof(*sbi) + size, GFP_KERNEL);
403 if (unlikely(!sbi)) { 455 if (unlikely(!sbi)) {
404 ret = -ENOMEM; 456 ret = -ENOMEM;
405 goto out; 457 goto out;
406 } 458 }
407 memset(&sbi->s_ods[1], 0, size - sizeof(sbi->s_ods[0])); 459 memset(&sbi->layout.s_ods[1], 0,
460 size - sizeof(sbi->layout.s_ods[0]));
408 *psbi = sbi; 461 *psbi = sbi;
409 } 462 }
410 463
@@ -427,8 +480,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
427 * line. We always keep them in device-table order. 480 * line. We always keep them in device-table order.
428 */ 481 */
429 if (fscb_od && osduld_device_same(fscb_od, &odi)) { 482 if (fscb_od && osduld_device_same(fscb_od, &odi)) {
430 sbi->s_ods[i] = fscb_od; 483 sbi->layout.s_ods[i] = fscb_od;
431 ++sbi->s_numdevs; 484 ++sbi->layout.s_numdevs;
432 fscb_od = NULL; 485 fscb_od = NULL;
433 continue; 486 continue;
434 } 487 }
@@ -441,8 +494,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
441 goto out; 494 goto out;
442 } 495 }
443 496
444 sbi->s_ods[i] = od; 497 sbi->layout.s_ods[i] = od;
445 ++sbi->s_numdevs; 498 ++sbi->layout.s_numdevs;
446 499
447 /* Read the fscb of the other devices to make sure the FS 500 /* Read the fscb of the other devices to make sure the FS
448 * partition is there. 501 * partition is there.
@@ -499,9 +552,15 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
499 goto free_sbi; 552 goto free_sbi;
500 } 553 }
501 554
502 sbi->s_ods[0] = od; 555 /* Default layout in case we do not have a device-table */
503 sbi->s_numdevs = 1; 556 sbi->layout.stripe_unit = PAGE_SIZE;
504 sbi->s_pid = opts->pid; 557 sbi->layout.mirrors_p1 = 1;
558 sbi->layout.group_width = 1;
559 sbi->layout.group_depth = -1;
560 sbi->layout.group_count = 1;
561 sbi->layout.s_ods[0] = od;
562 sbi->layout.s_numdevs = 1;
563 sbi->layout.s_pid = opts->pid;
505 sbi->s_timeout = opts->timeout; 564 sbi->s_timeout = opts->timeout;
506 565
507 /* fill in some other data by hand */ 566 /* fill in some other data by hand */
@@ -514,7 +573,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
514 sb->s_bdev = NULL; 573 sb->s_bdev = NULL;
515 sb->s_dev = 0; 574 sb->s_dev = 0;
516 575
517 obj.partition = sbi->s_pid; 576 obj.partition = sbi->layout.s_pid;
518 obj.id = EXOFS_SUPER_ID; 577 obj.id = EXOFS_SUPER_ID;
519 exofs_make_credential(sbi->s_cred, &obj); 578 exofs_make_credential(sbi->s_cred, &obj);
520 579
@@ -578,13 +637,13 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
578 goto free_sbi; 637 goto free_sbi;
579 } 638 }
580 639
581 _exofs_print_device("Mounting", opts->dev_name, sbi->s_ods[0], 640 _exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0],
582 sbi->s_pid); 641 sbi->layout.s_pid);
583 return 0; 642 return 0;
584 643
585free_sbi: 644free_sbi:
586 EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n", 645 EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
587 opts->dev_name, sbi->s_pid, ret); 646 opts->dev_name, sbi->layout.s_pid, ret);
588 exofs_free_sbi(sbi); 647 exofs_free_sbi(sbi);
589 return ret; 648 return ret;
590} 649}
@@ -627,7 +686,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
627 uint8_t cred_a[OSD_CAP_LEN]; 686 uint8_t cred_a[OSD_CAP_LEN];
628 int ret; 687 int ret;
629 688
630 ret = exofs_get_io_state(sbi, &ios); 689 ret = exofs_get_io_state(&sbi->layout, &ios);
631 if (ret) { 690 if (ret) {
632 EXOFS_DBGMSG("exofs_get_io_state failed.\n"); 691 EXOFS_DBGMSG("exofs_get_io_state failed.\n");
633 return ret; 692 return ret;
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 061914add3cf..0b038e47ad2f 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -118,7 +118,7 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
118 118
119/* inode.c */ 119/* inode.c */
120extern struct inode *ext2_iget (struct super_block *, unsigned long); 120extern struct inode *ext2_iget (struct super_block *, unsigned long);
121extern int ext2_write_inode (struct inode *, int); 121extern int ext2_write_inode (struct inode *, struct writeback_control *);
122extern void ext2_delete_inode (struct inode *); 122extern void ext2_delete_inode (struct inode *);
123extern int ext2_sync_inode (struct inode *); 123extern int ext2_sync_inode (struct inode *);
124extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); 124extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 45ff49f0a4b5..fc13cc119aad 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -41,6 +41,8 @@ MODULE_AUTHOR("Remy Card and others");
41MODULE_DESCRIPTION("Second Extended Filesystem"); 41MODULE_DESCRIPTION("Second Extended Filesystem");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44static int __ext2_write_inode(struct inode *inode, int do_sync);
45
44/* 46/*
45 * Test whether an inode is a fast symlink. 47 * Test whether an inode is a fast symlink.
46 */ 48 */
@@ -66,7 +68,7 @@ void ext2_delete_inode (struct inode * inode)
66 goto no_delete; 68 goto no_delete;
67 EXT2_I(inode)->i_dtime = get_seconds(); 69 EXT2_I(inode)->i_dtime = get_seconds();
68 mark_inode_dirty(inode); 70 mark_inode_dirty(inode);
69 ext2_write_inode(inode, inode_needs_sync(inode)); 71 __ext2_write_inode(inode, inode_needs_sync(inode));
70 72
71 inode->i_size = 0; 73 inode->i_size = 0;
72 if (inode->i_blocks) 74 if (inode->i_blocks)
@@ -1337,7 +1339,7 @@ bad_inode:
1337 return ERR_PTR(ret); 1339 return ERR_PTR(ret);
1338} 1340}
1339 1341
1340int ext2_write_inode(struct inode *inode, int do_sync) 1342static int __ext2_write_inode(struct inode *inode, int do_sync)
1341{ 1343{
1342 struct ext2_inode_info *ei = EXT2_I(inode); 1344 struct ext2_inode_info *ei = EXT2_I(inode);
1343 struct super_block *sb = inode->i_sb; 1345 struct super_block *sb = inode->i_sb;
@@ -1442,6 +1444,11 @@ int ext2_write_inode(struct inode *inode, int do_sync)
1442 return err; 1444 return err;
1443} 1445}
1444 1446
1447int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1448{
1449 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1450}
1451
1445int ext2_sync_inode(struct inode *inode) 1452int ext2_sync_inode(struct inode *inode)
1446{ 1453{
1447 struct writeback_control wbc = { 1454 struct writeback_control wbc = {
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ffbbc65e3f68..7f920b7263a4 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3107,7 +3107,7 @@ out_brelse:
3107 * `stuff()' is running, and the new i_size will be lost. Plus the inode 3107 * `stuff()' is running, and the new i_size will be lost. Plus the inode
3108 * will no longer be on the superblock's dirty inode list. 3108 * will no longer be on the superblock's dirty inode list.
3109 */ 3109 */
3110int ext3_write_inode(struct inode *inode, int wait) 3110int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
3111{ 3111{
3112 if (current->flags & PF_MEMALLOC) 3112 if (current->flags & PF_MEMALLOC)
3113 return 0; 3113 return 0;
@@ -3118,7 +3118,7 @@ int ext3_write_inode(struct inode *inode, int wait)
3118 return -EIO; 3118 return -EIO;
3119 } 3119 }
3120 3120
3121 if (!wait) 3121 if (wbc->sync_mode != WB_SYNC_ALL)
3122 return 0; 3122 return 0;
3123 3123
3124 return ext3_force_commit(inode->i_sb); 3124 return ext3_force_commit(inode->i_sb);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 22bc7435d913..d2f37a5516c7 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -97,8 +97,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
97 /* If checksum is bad mark all blocks used to prevent allocation 97 /* If checksum is bad mark all blocks used to prevent allocation
98 * essentially implementing a per-group read-only flag. */ 98 * essentially implementing a per-group read-only flag. */
99 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 99 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
100 ext4_error(sb, __func__, 100 ext4_error(sb, "Checksum bad for group %u",
101 "Checksum bad for group %u", block_group); 101 block_group);
102 ext4_free_blks_set(sb, gdp, 0); 102 ext4_free_blks_set(sb, gdp, 0);
103 ext4_free_inodes_set(sb, gdp, 0); 103 ext4_free_inodes_set(sb, gdp, 0);
104 ext4_itable_unused_set(sb, gdp, 0); 104 ext4_itable_unused_set(sb, gdp, 0);
@@ -130,8 +130,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
130 * to make sure we calculate the right free blocks 130 * to make sure we calculate the right free blocks
131 */ 131 */
132 group_blocks = ext4_blocks_count(sbi->s_es) - 132 group_blocks = ext4_blocks_count(sbi->s_es) -
133 le32_to_cpu(sbi->s_es->s_first_data_block) - 133 ext4_group_first_block_no(sb, ngroups - 1);
134 (EXT4_BLOCKS_PER_GROUP(sb) * (ngroups - 1));
135 } else { 134 } else {
136 group_blocks = EXT4_BLOCKS_PER_GROUP(sb); 135 group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
137 } 136 }
@@ -189,9 +188,6 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
189 * when a file system is mounted (see ext4_fill_super). 188 * when a file system is mounted (see ext4_fill_super).
190 */ 189 */
191 190
192
193#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
194
195/** 191/**
196 * ext4_get_group_desc() -- load group descriptor from disk 192 * ext4_get_group_desc() -- load group descriptor from disk
197 * @sb: super block 193 * @sb: super block
@@ -210,10 +206,8 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
210 struct ext4_sb_info *sbi = EXT4_SB(sb); 206 struct ext4_sb_info *sbi = EXT4_SB(sb);
211 207
212 if (block_group >= ngroups) { 208 if (block_group >= ngroups) {
213 ext4_error(sb, "ext4_get_group_desc", 209 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
214 "block_group >= groups_count - " 210 " groups_count = %u", block_group, ngroups);
215 "block_group = %u, groups_count = %u",
216 block_group, ngroups);
217 211
218 return NULL; 212 return NULL;
219 } 213 }
@@ -221,8 +215,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
221 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 215 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
222 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 216 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
223 if (!sbi->s_group_desc[group_desc]) { 217 if (!sbi->s_group_desc[group_desc]) {
224 ext4_error(sb, "ext4_get_group_desc", 218 ext4_error(sb, "Group descriptor not loaded - "
225 "Group descriptor not loaded - "
226 "block_group = %u, group_desc = %u, desc = %u", 219 "block_group = %u, group_desc = %u, desc = %u",
227 block_group, group_desc, offset); 220 block_group, group_desc, offset);
228 return NULL; 221 return NULL;
@@ -282,9 +275,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
282 return 1; 275 return 1;
283 276
284err_out: 277err_out:
285 ext4_error(sb, __func__, 278 ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
286 "Invalid block bitmap - "
287 "block_group = %d, block = %llu",
288 block_group, bitmap_blk); 279 block_group, bitmap_blk);
289 return 0; 280 return 0;
290} 281}
@@ -311,8 +302,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
311 bitmap_blk = ext4_block_bitmap(sb, desc); 302 bitmap_blk = ext4_block_bitmap(sb, desc);
312 bh = sb_getblk(sb, bitmap_blk); 303 bh = sb_getblk(sb, bitmap_blk);
313 if (unlikely(!bh)) { 304 if (unlikely(!bh)) {
314 ext4_error(sb, __func__, 305 ext4_error(sb, "Cannot read block bitmap - "
315 "Cannot read block bitmap - "
316 "block_group = %u, block_bitmap = %llu", 306 "block_group = %u, block_bitmap = %llu",
317 block_group, bitmap_blk); 307 block_group, bitmap_blk);
318 return NULL; 308 return NULL;
@@ -354,8 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
354 set_bitmap_uptodate(bh); 344 set_bitmap_uptodate(bh);
355 if (bh_submit_read(bh) < 0) { 345 if (bh_submit_read(bh) < 0) {
356 put_bh(bh); 346 put_bh(bh);
357 ext4_error(sb, __func__, 347 ext4_error(sb, "Cannot read block bitmap - "
358 "Cannot read block bitmap - "
359 "block_group = %u, block_bitmap = %llu", 348 "block_group = %u, block_bitmap = %llu",
360 block_group, bitmap_blk); 349 block_group, bitmap_blk);
361 return NULL; 350 return NULL;
@@ -419,8 +408,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
419 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 408 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
420 in_range(block + count - 1, ext4_inode_table(sb, desc), 409 in_range(block + count - 1, ext4_inode_table(sb, desc),
421 sbi->s_itb_per_group)) { 410 sbi->s_itb_per_group)) {
422 ext4_error(sb, __func__, 411 ext4_error(sb, "Adding blocks in system zones - "
423 "Adding blocks in system zones - "
424 "Block = %llu, count = %lu", 412 "Block = %llu, count = %lu",
425 block, count); 413 block, count);
426 goto error_return; 414 goto error_return;
@@ -453,8 +441,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
453 BUFFER_TRACE(bitmap_bh, "clear bit"); 441 BUFFER_TRACE(bitmap_bh, "clear bit");
454 if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), 442 if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
455 bit + i, bitmap_bh->b_data)) { 443 bit + i, bitmap_bh->b_data)) {
456 ext4_error(sb, __func__, 444 ext4_error(sb, "bit already cleared for block %llu",
457 "bit already cleared for block %llu",
458 (ext4_fsblk_t)(block + i)); 445 (ext4_fsblk_t)(block + i));
459 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 446 BUFFER_TRACE(bitmap_bh, "bit already cleared");
460 } else { 447 } else {
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index a60ab9aad57d..983f0e127493 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -205,14 +205,14 @@ void ext4_release_system_zone(struct super_block *sb)
205 entry = rb_entry(n, struct ext4_system_zone, node); 205 entry = rb_entry(n, struct ext4_system_zone, node);
206 kmem_cache_free(ext4_system_zone_cachep, entry); 206 kmem_cache_free(ext4_system_zone_cachep, entry);
207 if (!parent) 207 if (!parent)
208 EXT4_SB(sb)->system_blks.rb_node = NULL; 208 EXT4_SB(sb)->system_blks = RB_ROOT;
209 else if (parent->rb_left == n) 209 else if (parent->rb_left == n)
210 parent->rb_left = NULL; 210 parent->rb_left = NULL;
211 else if (parent->rb_right == n) 211 else if (parent->rb_right == n)
212 parent->rb_right = NULL; 212 parent->rb_right = NULL;
213 n = parent; 213 n = parent;
214 } 214 }
215 EXT4_SB(sb)->system_blks.rb_node = NULL; 215 EXT4_SB(sb)->system_blks = RB_ROOT;
216} 216}
217 217
218/* 218/*
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 9dc93168e262..86cb6d86a048 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -83,10 +83,12 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
83 error_msg = "inode out of bounds"; 83 error_msg = "inode out of bounds";
84 84
85 if (error_msg != NULL) 85 if (error_msg != NULL)
86 ext4_error(dir->i_sb, function, 86 __ext4_error(dir->i_sb, function,
87 "bad entry in directory #%lu: %s - " 87 "bad entry in directory #%lu: %s - block=%llu"
88 "offset=%u, inode=%u, rec_len=%d, name_len=%d", 88 "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
89 dir->i_ino, error_msg, offset, 89 dir->i_ino, error_msg,
90 (unsigned long long) bh->b_blocknr,
91 (unsigned) (offset%bh->b_size), offset,
90 le32_to_cpu(de->inode), 92 le32_to_cpu(de->inode),
91 rlen, de->name_len); 93 rlen, de->name_len);
92 return error_msg == NULL ? 1 : 0; 94 return error_msg == NULL ? 1 : 0;
@@ -150,7 +152,7 @@ static int ext4_readdir(struct file *filp,
150 */ 152 */
151 if (!bh) { 153 if (!bh) {
152 if (!dir_has_error) { 154 if (!dir_has_error) {
153 ext4_error(sb, __func__, "directory #%lu " 155 ext4_error(sb, "directory #%lu "
154 "contains a hole at offset %Lu", 156 "contains a hole at offset %Lu",
155 inode->i_ino, 157 inode->i_ino,
156 (unsigned long long) filp->f_pos); 158 (unsigned long long) filp->f_pos);
@@ -303,7 +305,7 @@ static void free_rb_tree_fname(struct rb_root *root)
303 kfree(old); 305 kfree(old);
304 } 306 }
305 if (!parent) 307 if (!parent)
306 root->rb_node = NULL; 308 *root = RB_ROOT;
307 else if (parent->rb_left == n) 309 else if (parent->rb_left == n)
308 parent->rb_left = NULL; 310 parent->rb_left = NULL;
309 else if (parent->rb_right == n) 311 else if (parent->rb_right == n)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 874d169a193e..bf938cf7c5f0 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -53,6 +53,12 @@
53#define ext4_debug(f, a...) do {} while (0) 53#define ext4_debug(f, a...) do {} while (0)
54#endif 54#endif
55 55
56#define EXT4_ERROR_INODE(inode, fmt, a...) \
57 ext4_error_inode(__func__, (inode), (fmt), ## a);
58
59#define EXT4_ERROR_FILE(file, fmt, a...) \
60 ext4_error_file(__func__, (file), (fmt), ## a);
61
56/* data type for block offset of block group */ 62/* data type for block offset of block group */
57typedef int ext4_grpblk_t; 63typedef int ext4_grpblk_t;
58 64
@@ -133,14 +139,14 @@ struct mpage_da_data {
133 int pages_written; 139 int pages_written;
134 int retval; 140 int retval;
135}; 141};
136#define DIO_AIO_UNWRITTEN 0x1 142#define EXT4_IO_UNWRITTEN 0x1
137typedef struct ext4_io_end { 143typedef struct ext4_io_end {
138 struct list_head list; /* per-file finished AIO list */ 144 struct list_head list; /* per-file finished AIO list */
139 struct inode *inode; /* file being written to */ 145 struct inode *inode; /* file being written to */
140 unsigned int flag; /* unwritten or not */ 146 unsigned int flag; /* unwritten or not */
141 int error; /* I/O error code */ 147 struct page *page; /* page struct for buffer write */
142 ext4_lblk_t offset; /* offset in the file */ 148 loff_t offset; /* offset in the file */
143 size_t size; /* size of the extent */ 149 ssize_t size; /* size of the extent */
144 struct work_struct work; /* data work queue */ 150 struct work_struct work; /* data work queue */
145} ext4_io_end_t; 151} ext4_io_end_t;
146 152
@@ -284,10 +290,12 @@ struct flex_groups {
284#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ 290#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
285#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ 291#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
286#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ 292#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
293#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
294#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
287#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ 295#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
288 296
289#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */ 297#define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */
290#define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */ 298#define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */
291 299
292/* Flags that should be inherited by new inodes from their parent. */ 300/* Flags that should be inherited by new inodes from their parent. */
293#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ 301#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
@@ -313,17 +321,6 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
313 return flags & EXT4_OTHER_FLMASK; 321 return flags & EXT4_OTHER_FLMASK;
314} 322}
315 323
316/*
317 * Inode dynamic state flags
318 */
319#define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */
320#define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
321#define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
322#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
323#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
324#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */
325#define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/
326
327/* Used to pass group descriptor data when online resize is done */ 324/* Used to pass group descriptor data when online resize is done */
328struct ext4_new_group_input { 325struct ext4_new_group_input {
329 __u32 group; /* Group number for this data */ 326 __u32 group; /* Group number for this data */
@@ -364,19 +361,20 @@ struct ext4_new_group_data {
364 /* caller is from the direct IO path, request to creation of an 361 /* caller is from the direct IO path, request to creation of an
365 unitialized extents if not allocated, split the uninitialized 362 unitialized extents if not allocated, split the uninitialized
366 extent if blocks has been preallocated already*/ 363 extent if blocks has been preallocated already*/
367#define EXT4_GET_BLOCKS_DIO 0x0008 364#define EXT4_GET_BLOCKS_PRE_IO 0x0008
368#define EXT4_GET_BLOCKS_CONVERT 0x0010 365#define EXT4_GET_BLOCKS_CONVERT 0x0010
369#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ 366#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
367 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
368 /* Convert extent to initialized after IO complete */
369#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
370 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) 370 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
371 /* Convert extent to initialized after direct IO complete */
372#define EXT4_GET_BLOCKS_DIO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
373 EXT4_GET_BLOCKS_DIO_CREATE_EXT)
374 371
375/* 372/*
376 * Flags used by ext4_free_blocks 373 * Flags used by ext4_free_blocks
377 */ 374 */
378#define EXT4_FREE_BLOCKS_METADATA 0x0001 375#define EXT4_FREE_BLOCKS_METADATA 0x0001
379#define EXT4_FREE_BLOCKS_FORGET 0x0002 376#define EXT4_FREE_BLOCKS_FORGET 0x0002
377#define EXT4_FREE_BLOCKS_VALIDATED 0x0004
380 378
381/* 379/*
382 * ioctl commands 380 * ioctl commands
@@ -630,7 +628,7 @@ struct ext4_inode_info {
630 * near to their parent directory's inode. 628 * near to their parent directory's inode.
631 */ 629 */
632 ext4_group_t i_block_group; 630 ext4_group_t i_block_group;
633 __u32 i_state; /* Dynamic state flags for ext4 */ 631 unsigned long i_state_flags; /* Dynamic state flags */
634 632
635 ext4_lblk_t i_dir_start_lookup; 633 ext4_lblk_t i_dir_start_lookup;
636#ifdef CONFIG_EXT4_FS_XATTR 634#ifdef CONFIG_EXT4_FS_XATTR
@@ -708,8 +706,9 @@ struct ext4_inode_info {
708 qsize_t i_reserved_quota; 706 qsize_t i_reserved_quota;
709#endif 707#endif
710 708
711 /* completed async DIOs that might need unwritten extents handling */ 709 /* completed IOs that might need unwritten extents handling */
712 struct list_head i_aio_dio_complete_list; 710 struct list_head i_completed_io_list;
711 spinlock_t i_completed_io_lock;
713 /* current io_end structure for async DIO write*/ 712 /* current io_end structure for async DIO write*/
714 ext4_io_end_t *cur_aio_dio; 713 ext4_io_end_t *cur_aio_dio;
715 714
@@ -760,6 +759,7 @@ struct ext4_inode_info {
760#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ 759#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
761#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ 760#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
762#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ 761#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
762#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
763#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ 763#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
764#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ 764#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
765#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ 765#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
@@ -1014,7 +1014,7 @@ struct ext4_sb_info {
1014 atomic_t s_lock_busy; 1014 atomic_t s_lock_busy;
1015 1015
1016 /* locality groups */ 1016 /* locality groups */
1017 struct ext4_locality_group *s_locality_groups; 1017 struct ext4_locality_group __percpu *s_locality_groups;
1018 1018
1019 /* for write statistics */ 1019 /* for write statistics */
1020 unsigned long s_sectors_written_start; 1020 unsigned long s_sectors_written_start;
@@ -1050,6 +1050,34 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
1050 (ino >= EXT4_FIRST_INO(sb) && 1050 (ino >= EXT4_FIRST_INO(sb) &&
1051 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); 1051 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
1052} 1052}
1053
1054/*
1055 * Inode dynamic state flags
1056 */
1057enum {
1058 EXT4_STATE_JDATA, /* journaled data exists */
1059 EXT4_STATE_NEW, /* inode is newly created */
1060 EXT4_STATE_XATTR, /* has in-inode xattrs */
1061 EXT4_STATE_NO_EXPAND, /* No space for expansion */
1062 EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
1063 EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
1064 EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
1065};
1066
1067static inline int ext4_test_inode_state(struct inode *inode, int bit)
1068{
1069 return test_bit(bit, &EXT4_I(inode)->i_state_flags);
1070}
1071
1072static inline void ext4_set_inode_state(struct inode *inode, int bit)
1073{
1074 set_bit(bit, &EXT4_I(inode)->i_state_flags);
1075}
1076
1077static inline void ext4_clear_inode_state(struct inode *inode, int bit)
1078{
1079 clear_bit(bit, &EXT4_I(inode)->i_state_flags);
1080}
1053#else 1081#else
1054/* Assume that user mode programs are passing in an ext4fs superblock, not 1082/* Assume that user mode programs are passing in an ext4fs superblock, not
1055 * a kernel struct super_block. This will allow us to call the feature-test 1083 * a kernel struct super_block. This will allow us to call the feature-test
@@ -1126,6 +1154,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
1126#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 1154#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
1127#define EXT4_FEATURE_INCOMPAT_MMP 0x0100 1155#define EXT4_FEATURE_INCOMPAT_MMP 0x0100
1128#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 1156#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
1157#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */
1158#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
1129 1159
1130#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR 1160#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
1131#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ 1161#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
@@ -1416,7 +1446,7 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
1416 struct buffer_head *bh_result, int create); 1446 struct buffer_head *bh_result, int create);
1417 1447
1418extern struct inode *ext4_iget(struct super_block *, unsigned long); 1448extern struct inode *ext4_iget(struct super_block *, unsigned long);
1419extern int ext4_write_inode(struct inode *, int); 1449extern int ext4_write_inode(struct inode *, struct writeback_control *);
1420extern int ext4_setattr(struct dentry *, struct iattr *); 1450extern int ext4_setattr(struct dentry *, struct iattr *);
1421extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 1451extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
1422 struct kstat *stat); 1452 struct kstat *stat);
@@ -1439,7 +1469,7 @@ extern int ext4_block_truncate_page(handle_t *handle,
1439 struct address_space *mapping, loff_t from); 1469 struct address_space *mapping, loff_t from);
1440extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1470extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1441extern qsize_t *ext4_get_reserved_space(struct inode *inode); 1471extern qsize_t *ext4_get_reserved_space(struct inode *inode);
1442extern int flush_aio_dio_completed_IO(struct inode *inode); 1472extern int flush_completed_IO(struct inode *inode);
1443extern void ext4_da_update_reserve_space(struct inode *inode, 1473extern void ext4_da_update_reserve_space(struct inode *inode,
1444 int used, int quota_claim); 1474 int used, int quota_claim);
1445/* ioctl.c */ 1475/* ioctl.c */
@@ -1465,13 +1495,20 @@ extern int ext4_group_extend(struct super_block *sb,
1465 ext4_fsblk_t n_blocks_count); 1495 ext4_fsblk_t n_blocks_count);
1466 1496
1467/* super.c */ 1497/* super.c */
1468extern void ext4_error(struct super_block *, const char *, const char *, ...) 1498extern void __ext4_error(struct super_block *, const char *, const char *, ...)
1499 __attribute__ ((format (printf, 3, 4)));
1500#define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message)
1501extern void ext4_error_inode(const char *, struct inode *, const char *, ...)
1502 __attribute__ ((format (printf, 3, 4)));
1503extern void ext4_error_file(const char *, struct file *, const char *, ...)
1469 __attribute__ ((format (printf, 3, 4))); 1504 __attribute__ ((format (printf, 3, 4)));
1470extern void __ext4_std_error(struct super_block *, const char *, int); 1505extern void __ext4_std_error(struct super_block *, const char *, int);
1471extern void ext4_abort(struct super_block *, const char *, const char *, ...) 1506extern void ext4_abort(struct super_block *, const char *, const char *, ...)
1472 __attribute__ ((format (printf, 3, 4))); 1507 __attribute__ ((format (printf, 3, 4)));
1473extern void ext4_warning(struct super_block *, const char *, const char *, ...) 1508extern void __ext4_warning(struct super_block *, const char *,
1509 const char *, ...)
1474 __attribute__ ((format (printf, 3, 4))); 1510 __attribute__ ((format (printf, 3, 4)));
1511#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message)
1475extern void ext4_msg(struct super_block *, const char *, const char *, ...) 1512extern void ext4_msg(struct super_block *, const char *, const char *, ...)
1476 __attribute__ ((format (printf, 3, 4))); 1513 __attribute__ ((format (printf, 3, 4)));
1477extern void ext4_grp_locked_error(struct super_block *, ext4_group_t, 1514extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
@@ -1744,7 +1781,7 @@ extern void ext4_ext_release(struct super_block *);
1744extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, 1781extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
1745 loff_t len); 1782 loff_t len);
1746extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 1783extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
1747 loff_t len); 1784 ssize_t len);
1748extern int ext4_get_blocks(handle_t *handle, struct inode *inode, 1785extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
1749 sector_t block, unsigned int max_blocks, 1786 sector_t block, unsigned int max_blocks,
1750 struct buffer_head *bh, int flags); 1787 struct buffer_head *bh, int flags);
@@ -1756,6 +1793,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
1756 __u64 len, __u64 *moved_len); 1793 __u64 len, __u64 *moved_len);
1757 1794
1758 1795
1796/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
1797enum ext4_state_bits {
1798 BH_Uninit /* blocks are allocated but uninitialized on disk */
1799 = BH_JBDPrivateStart,
1800};
1801
1802BUFFER_FNS(Uninit, uninit)
1803TAS_BUFFER_FNS(Uninit, uninit)
1804
1759/* 1805/*
1760 * Add new method to test wether block and inode bitmaps are properly 1806 * Add new method to test wether block and inode bitmaps are properly
1761 * initialized. With uninit_bg reading the block from disk is not enough 1807 * initialized. With uninit_bg reading the block from disk is not enough
@@ -1773,6 +1819,8 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
1773 set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); 1819 set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
1774} 1820}
1775 1821
1822#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
1823
1776#endif /* __KERNEL__ */ 1824#endif /* __KERNEL__ */
1777 1825
1778#endif /* _EXT4_H */ 1826#endif /* _EXT4_H */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index b57e5c711b6d..53d2764d71ca 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -125,14 +125,14 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
125 ext4_journal_abort_handle(where, __func__, bh, 125 ext4_journal_abort_handle(where, __func__, bh,
126 handle, err); 126 handle, err);
127 } else { 127 } else {
128 if (inode && bh) 128 if (inode)
129 mark_buffer_dirty_inode(bh, inode); 129 mark_buffer_dirty_inode(bh, inode);
130 else 130 else
131 mark_buffer_dirty(bh); 131 mark_buffer_dirty(bh);
132 if (inode && inode_needs_sync(inode)) { 132 if (inode && inode_needs_sync(inode)) {
133 sync_dirty_buffer(bh); 133 sync_dirty_buffer(bh);
134 if (buffer_req(bh) && !buffer_uptodate(bh)) { 134 if (buffer_req(bh) && !buffer_uptodate(bh)) {
135 ext4_error(inode->i_sb, __func__, 135 ext4_error(inode->i_sb,
136 "IO error syncing inode, " 136 "IO error syncing inode, "
137 "inode=%lu, block=%llu", 137 "inode=%lu, block=%llu",
138 inode->i_ino, 138 inode->i_ino,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 05eca817d704..b79ad5126468 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -304,4 +304,28 @@ static inline int ext4_should_writeback_data(struct inode *inode)
304 return 0; 304 return 0;
305} 305}
306 306
307/*
308 * This function controls whether or not we should try to go down the
309 * dioread_nolock code paths, which makes it safe to avoid taking
310 * i_mutex for direct I/O reads. This only works for extent-based
311 * files, and it doesn't work for nobh or if data journaling is
312 * enabled, since the dioread_nolock code uses b_private to pass
313 * information back to the I/O completion handler, and this conflicts
314 * with the jbd's use of b_private.
315 */
316static inline int ext4_should_dioread_nolock(struct inode *inode)
317{
318 if (!test_opt(inode->i_sb, DIOREAD_NOLOCK))
319 return 0;
320 if (test_opt(inode->i_sb, NOBH))
321 return 0;
322 if (!S_ISREG(inode->i_mode))
323 return 0;
324 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
325 return 0;
326 if (ext4_should_journal_data(inode))
327 return 0;
328 return 1;
329}
330
307#endif /* _EXT4_JBD2_H */ 331#endif /* _EXT4_JBD2_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 765a4826b118..94c8ee81f5e1 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -195,8 +195,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
195 if (S_ISREG(inode->i_mode)) 195 if (S_ISREG(inode->i_mode))
196 block_group++; 196 block_group++;
197 } 197 }
198 bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + 198 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
199 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
200 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 199 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
201 200
202 /* 201 /*
@@ -440,7 +439,7 @@ static int __ext4_ext_check(const char *function, struct inode *inode,
440 return 0; 439 return 0;
441 440
442corrupted: 441corrupted:
443 ext4_error(inode->i_sb, function, 442 __ext4_error(inode->i_sb, function,
444 "bad header/extent in inode #%lu: %s - magic %x, " 443 "bad header/extent in inode #%lu: %s - magic %x, "
445 "entries %u, max %u(%u), depth %u(%u)", 444 "entries %u, max %u(%u), depth %u(%u)",
446 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), 445 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
@@ -703,7 +702,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
703 } 702 }
704 eh = ext_block_hdr(bh); 703 eh = ext_block_hdr(bh);
705 ppos++; 704 ppos++;
706 BUG_ON(ppos > depth); 705 if (unlikely(ppos > depth)) {
706 put_bh(bh);
707 EXT4_ERROR_INODE(inode,
708 "ppos %d > depth %d", ppos, depth);
709 goto err;
710 }
707 path[ppos].p_bh = bh; 711 path[ppos].p_bh = bh;
708 path[ppos].p_hdr = eh; 712 path[ppos].p_hdr = eh;
709 i--; 713 i--;
@@ -749,7 +753,12 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
749 if (err) 753 if (err)
750 return err; 754 return err;
751 755
752 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); 756 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
757 EXT4_ERROR_INODE(inode,
758 "logical %d == ei_block %d!",
759 logical, le32_to_cpu(curp->p_idx->ei_block));
760 return -EIO;
761 }
753 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; 762 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
754 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 763 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
755 /* insert after */ 764 /* insert after */
@@ -779,9 +788,17 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
779 ext4_idx_store_pblock(ix, ptr); 788 ext4_idx_store_pblock(ix, ptr);
780 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 789 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
781 790
782 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) 791 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
783 > le16_to_cpu(curp->p_hdr->eh_max)); 792 > le16_to_cpu(curp->p_hdr->eh_max))) {
784 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); 793 EXT4_ERROR_INODE(inode,
794 "logical %d == ei_block %d!",
795 logical, le32_to_cpu(curp->p_idx->ei_block));
796 return -EIO;
797 }
798 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
799 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
800 return -EIO;
801 }
785 802
786 err = ext4_ext_dirty(handle, inode, curp); 803 err = ext4_ext_dirty(handle, inode, curp);
787 ext4_std_error(inode->i_sb, err); 804 ext4_std_error(inode->i_sb, err);
@@ -819,7 +836,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
819 836
820 /* if current leaf will be split, then we should use 837 /* if current leaf will be split, then we should use
821 * border from split point */ 838 * border from split point */
822 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); 839 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
840 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
841 return -EIO;
842 }
823 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 843 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
824 border = path[depth].p_ext[1].ee_block; 844 border = path[depth].p_ext[1].ee_block;
825 ext_debug("leaf will be split." 845 ext_debug("leaf will be split."
@@ -860,7 +880,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
860 880
861 /* initialize new leaf */ 881 /* initialize new leaf */
862 newblock = ablocks[--a]; 882 newblock = ablocks[--a];
863 BUG_ON(newblock == 0); 883 if (unlikely(newblock == 0)) {
884 EXT4_ERROR_INODE(inode, "newblock == 0!");
885 err = -EIO;
886 goto cleanup;
887 }
864 bh = sb_getblk(inode->i_sb, newblock); 888 bh = sb_getblk(inode->i_sb, newblock);
865 if (!bh) { 889 if (!bh) {
866 err = -EIO; 890 err = -EIO;
@@ -880,7 +904,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
880 ex = EXT_FIRST_EXTENT(neh); 904 ex = EXT_FIRST_EXTENT(neh);
881 905
882 /* move remainder of path[depth] to the new leaf */ 906 /* move remainder of path[depth] to the new leaf */
883 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); 907 if (unlikely(path[depth].p_hdr->eh_entries !=
908 path[depth].p_hdr->eh_max)) {
909 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
910 path[depth].p_hdr->eh_entries,
911 path[depth].p_hdr->eh_max);
912 err = -EIO;
913 goto cleanup;
914 }
884 /* start copy from next extent */ 915 /* start copy from next extent */
885 /* TODO: we could do it by single memmove */ 916 /* TODO: we could do it by single memmove */
886 m = 0; 917 m = 0;
@@ -927,7 +958,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
927 958
928 /* create intermediate indexes */ 959 /* create intermediate indexes */
929 k = depth - at - 1; 960 k = depth - at - 1;
930 BUG_ON(k < 0); 961 if (unlikely(k < 0)) {
962 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
963 err = -EIO;
964 goto cleanup;
965 }
931 if (k) 966 if (k)
932 ext_debug("create %d intermediate indices\n", k); 967 ext_debug("create %d intermediate indices\n", k);
933 /* insert new index into current index block */ 968 /* insert new index into current index block */
@@ -964,8 +999,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
964 999
965 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 1000 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
966 EXT_MAX_INDEX(path[i].p_hdr)); 1001 EXT_MAX_INDEX(path[i].p_hdr));
967 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != 1002 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
968 EXT_LAST_INDEX(path[i].p_hdr)); 1003 EXT_LAST_INDEX(path[i].p_hdr))) {
1004 EXT4_ERROR_INODE(inode,
1005 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1006 le32_to_cpu(path[i].p_ext->ee_block));
1007 err = -EIO;
1008 goto cleanup;
1009 }
969 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { 1010 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
970 ext_debug("%d: move %d:%llu in new index %llu\n", i, 1011 ext_debug("%d: move %d:%llu in new index %llu\n", i,
971 le32_to_cpu(path[i].p_idx->ei_block), 1012 le32_to_cpu(path[i].p_idx->ei_block),
@@ -1203,7 +1244,10 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1203 struct ext4_extent *ex; 1244 struct ext4_extent *ex;
1204 int depth, ee_len; 1245 int depth, ee_len;
1205 1246
1206 BUG_ON(path == NULL); 1247 if (unlikely(path == NULL)) {
1248 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1249 return -EIO;
1250 }
1207 depth = path->p_depth; 1251 depth = path->p_depth;
1208 *phys = 0; 1252 *phys = 0;
1209 1253
@@ -1217,15 +1261,33 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1217 ex = path[depth].p_ext; 1261 ex = path[depth].p_ext;
1218 ee_len = ext4_ext_get_actual_len(ex); 1262 ee_len = ext4_ext_get_actual_len(ex);
1219 if (*logical < le32_to_cpu(ex->ee_block)) { 1263 if (*logical < le32_to_cpu(ex->ee_block)) {
1220 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); 1264 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1265 EXT4_ERROR_INODE(inode,
1266 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1267 *logical, le32_to_cpu(ex->ee_block));
1268 return -EIO;
1269 }
1221 while (--depth >= 0) { 1270 while (--depth >= 0) {
1222 ix = path[depth].p_idx; 1271 ix = path[depth].p_idx;
1223 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); 1272 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1273 EXT4_ERROR_INODE(inode,
1274 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1275 ix != NULL ? ix->ei_block : 0,
1276 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1277 EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
1278 depth);
1279 return -EIO;
1280 }
1224 } 1281 }
1225 return 0; 1282 return 0;
1226 } 1283 }
1227 1284
1228 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); 1285 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1286 EXT4_ERROR_INODE(inode,
1287 "logical %d < ee_block %d + ee_len %d!",
1288 *logical, le32_to_cpu(ex->ee_block), ee_len);
1289 return -EIO;
1290 }
1229 1291
1230 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1292 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1231 *phys = ext_pblock(ex) + ee_len - 1; 1293 *phys = ext_pblock(ex) + ee_len - 1;
@@ -1251,7 +1313,10 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1251 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1313 int depth; /* Note, NOT eh_depth; depth from top of tree */
1252 int ee_len; 1314 int ee_len;
1253 1315
1254 BUG_ON(path == NULL); 1316 if (unlikely(path == NULL)) {
1317 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1318 return -EIO;
1319 }
1255 depth = path->p_depth; 1320 depth = path->p_depth;
1256 *phys = 0; 1321 *phys = 0;
1257 1322
@@ -1265,17 +1330,32 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1265 ex = path[depth].p_ext; 1330 ex = path[depth].p_ext;
1266 ee_len = ext4_ext_get_actual_len(ex); 1331 ee_len = ext4_ext_get_actual_len(ex);
1267 if (*logical < le32_to_cpu(ex->ee_block)) { 1332 if (*logical < le32_to_cpu(ex->ee_block)) {
1268 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); 1333 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1334 EXT4_ERROR_INODE(inode,
1335 "first_extent(path[%d].p_hdr) != ex",
1336 depth);
1337 return -EIO;
1338 }
1269 while (--depth >= 0) { 1339 while (--depth >= 0) {
1270 ix = path[depth].p_idx; 1340 ix = path[depth].p_idx;
1271 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); 1341 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1342 EXT4_ERROR_INODE(inode,
1343 "ix != EXT_FIRST_INDEX *logical %d!",
1344 *logical);
1345 return -EIO;
1346 }
1272 } 1347 }
1273 *logical = le32_to_cpu(ex->ee_block); 1348 *logical = le32_to_cpu(ex->ee_block);
1274 *phys = ext_pblock(ex); 1349 *phys = ext_pblock(ex);
1275 return 0; 1350 return 0;
1276 } 1351 }
1277 1352
1278 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); 1353 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1354 EXT4_ERROR_INODE(inode,
1355 "logical %d < ee_block %d + ee_len %d!",
1356 *logical, le32_to_cpu(ex->ee_block), ee_len);
1357 return -EIO;
1358 }
1279 1359
1280 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1360 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1281 /* next allocated block in this leaf */ 1361 /* next allocated block in this leaf */
@@ -1414,8 +1494,12 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1414 1494
1415 eh = path[depth].p_hdr; 1495 eh = path[depth].p_hdr;
1416 ex = path[depth].p_ext; 1496 ex = path[depth].p_ext;
1417 BUG_ON(ex == NULL); 1497
1418 BUG_ON(eh == NULL); 1498 if (unlikely(ex == NULL || eh == NULL)) {
1499 EXT4_ERROR_INODE(inode,
1500 "ex %p == NULL or eh %p == NULL", ex, eh);
1501 return -EIO;
1502 }
1419 1503
1420 if (depth == 0) { 1504 if (depth == 0) {
1421 /* there is no tree at all */ 1505 /* there is no tree at all */
@@ -1538,8 +1622,9 @@ int ext4_ext_try_to_merge(struct inode *inode,
1538 merge_done = 1; 1622 merge_done = 1;
1539 WARN_ON(eh->eh_entries == 0); 1623 WARN_ON(eh->eh_entries == 0);
1540 if (!eh->eh_entries) 1624 if (!eh->eh_entries)
1541 ext4_error(inode->i_sb, "ext4_ext_try_to_merge", 1625 ext4_error(inode->i_sb,
1542 "inode#%lu, eh->eh_entries = 0!", inode->i_ino); 1626 "inode#%lu, eh->eh_entries = 0!",
1627 inode->i_ino);
1543 } 1628 }
1544 1629
1545 return merge_done; 1630 return merge_done;
@@ -1612,13 +1697,19 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1612 ext4_lblk_t next; 1697 ext4_lblk_t next;
1613 unsigned uninitialized = 0; 1698 unsigned uninitialized = 0;
1614 1699
1615 BUG_ON(ext4_ext_get_actual_len(newext) == 0); 1700 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1701 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1702 return -EIO;
1703 }
1616 depth = ext_depth(inode); 1704 depth = ext_depth(inode);
1617 ex = path[depth].p_ext; 1705 ex = path[depth].p_ext;
1618 BUG_ON(path[depth].p_hdr == NULL); 1706 if (unlikely(path[depth].p_hdr == NULL)) {
1707 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1708 return -EIO;
1709 }
1619 1710
1620 /* try to insert block into found extent and return */ 1711 /* try to insert block into found extent and return */
1621 if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) 1712 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1622 && ext4_can_extents_be_merged(inode, ex, newext)) { 1713 && ext4_can_extents_be_merged(inode, ex, newext)) {
1623 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", 1714 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1624 ext4_ext_is_uninitialized(newext), 1715 ext4_ext_is_uninitialized(newext),
@@ -1739,7 +1830,7 @@ has_space:
1739 1830
1740merge: 1831merge:
1741 /* try to merge extents to the right */ 1832 /* try to merge extents to the right */
1742 if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) 1833 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1743 ext4_ext_try_to_merge(inode, path, nearex); 1834 ext4_ext_try_to_merge(inode, path, nearex);
1744 1835
1745 /* try to merge extents to the left */ 1836 /* try to merge extents to the left */
@@ -1787,7 +1878,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1787 } 1878 }
1788 1879
1789 depth = ext_depth(inode); 1880 depth = ext_depth(inode);
1790 BUG_ON(path[depth].p_hdr == NULL); 1881 if (unlikely(path[depth].p_hdr == NULL)) {
1882 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1883 err = -EIO;
1884 break;
1885 }
1791 ex = path[depth].p_ext; 1886 ex = path[depth].p_ext;
1792 next = ext4_ext_next_allocated_block(path); 1887 next = ext4_ext_next_allocated_block(path);
1793 1888
@@ -1838,7 +1933,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1838 cbex.ec_type = EXT4_EXT_CACHE_EXTENT; 1933 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1839 } 1934 }
1840 1935
1841 BUG_ON(cbex.ec_len == 0); 1936 if (unlikely(cbex.ec_len == 0)) {
1937 EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1938 err = -EIO;
1939 break;
1940 }
1842 err = func(inode, path, &cbex, ex, cbdata); 1941 err = func(inode, path, &cbex, ex, cbdata);
1843 ext4_ext_drop_refs(path); 1942 ext4_ext_drop_refs(path);
1844 1943
@@ -1952,7 +2051,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1952 2051
1953 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && 2052 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1954 cex->ec_type != EXT4_EXT_CACHE_EXTENT); 2053 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1955 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { 2054 if (in_range(block, cex->ec_block, cex->ec_len)) {
1956 ex->ee_block = cpu_to_le32(cex->ec_block); 2055 ex->ee_block = cpu_to_le32(cex->ec_block);
1957 ext4_ext_store_pblock(ex, cex->ec_start); 2056 ext4_ext_store_pblock(ex, cex->ec_start);
1958 ex->ee_len = cpu_to_le16(cex->ec_len); 2057 ex->ee_len = cpu_to_le16(cex->ec_len);
@@ -1981,7 +2080,10 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1981 /* free index block */ 2080 /* free index block */
1982 path--; 2081 path--;
1983 leaf = idx_pblock(path->p_idx); 2082 leaf = idx_pblock(path->p_idx);
1984 BUG_ON(path->p_hdr->eh_entries == 0); 2083 if (unlikely(path->p_hdr->eh_entries == 0)) {
2084 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2085 return -EIO;
2086 }
1985 err = ext4_ext_get_access(handle, inode, path); 2087 err = ext4_ext_get_access(handle, inode, path);
1986 if (err) 2088 if (err)
1987 return err; 2089 return err;
@@ -2119,8 +2221,10 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2119 if (!path[depth].p_hdr) 2221 if (!path[depth].p_hdr)
2120 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2222 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2121 eh = path[depth].p_hdr; 2223 eh = path[depth].p_hdr;
2122 BUG_ON(eh == NULL); 2224 if (unlikely(path[depth].p_hdr == NULL)) {
2123 2225 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2226 return -EIO;
2227 }
2124 /* find where to start removing */ 2228 /* find where to start removing */
2125 ex = EXT_LAST_EXTENT(eh); 2229 ex = EXT_LAST_EXTENT(eh);
2126 2230
@@ -2983,7 +3087,7 @@ fix_extent_len:
2983 ext4_ext_dirty(handle, inode, path + depth); 3087 ext4_ext_dirty(handle, inode, path + depth);
2984 return err; 3088 return err;
2985} 3089}
2986static int ext4_convert_unwritten_extents_dio(handle_t *handle, 3090static int ext4_convert_unwritten_extents_endio(handle_t *handle,
2987 struct inode *inode, 3091 struct inode *inode,
2988 struct ext4_ext_path *path) 3092 struct ext4_ext_path *path)
2989{ 3093{
@@ -3063,8 +3167,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3063 flags, allocated); 3167 flags, allocated);
3064 ext4_ext_show_leaf(inode, path); 3168 ext4_ext_show_leaf(inode, path);
3065 3169
3066 /* DIO get_block() before submit the IO, split the extent */ 3170 /* get_block() before submit the IO, split the extent */
3067 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { 3171 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3068 ret = ext4_split_unwritten_extents(handle, 3172 ret = ext4_split_unwritten_extents(handle,
3069 inode, path, iblock, 3173 inode, path, iblock,
3070 max_blocks, flags); 3174 max_blocks, flags);
@@ -3074,14 +3178,16 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3074 * completed 3178 * completed
3075 */ 3179 */
3076 if (io) 3180 if (io)
3077 io->flag = DIO_AIO_UNWRITTEN; 3181 io->flag = EXT4_IO_UNWRITTEN;
3078 else 3182 else
3079 EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN; 3183 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3184 if (ext4_should_dioread_nolock(inode))
3185 set_buffer_uninit(bh_result);
3080 goto out; 3186 goto out;
3081 } 3187 }
3082 /* async DIO end_io complete, convert the filled extent to written */ 3188 /* IO end_io complete, convert the filled extent to written */
3083 if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { 3189 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3084 ret = ext4_convert_unwritten_extents_dio(handle, inode, 3190 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3085 path); 3191 path);
3086 if (ret >= 0) 3192 if (ret >= 0)
3087 ext4_update_inode_fsync_trans(handle, inode, 1); 3193 ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -3185,7 +3291,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3185{ 3291{
3186 struct ext4_ext_path *path = NULL; 3292 struct ext4_ext_path *path = NULL;
3187 struct ext4_extent_header *eh; 3293 struct ext4_extent_header *eh;
3188 struct ext4_extent newex, *ex; 3294 struct ext4_extent newex, *ex, *last_ex;
3189 ext4_fsblk_t newblock; 3295 ext4_fsblk_t newblock;
3190 int err = 0, depth, ret, cache_type; 3296 int err = 0, depth, ret, cache_type;
3191 unsigned int allocated = 0; 3297 unsigned int allocated = 0;
@@ -3237,10 +3343,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3237 * this situation is possible, though, _during_ tree modification; 3343 * this situation is possible, though, _during_ tree modification;
3238 * this is why assert can't be put in ext4_ext_find_extent() 3344 * this is why assert can't be put in ext4_ext_find_extent()
3239 */ 3345 */
3240 if (path[depth].p_ext == NULL && depth != 0) { 3346 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3241 ext4_error(inode->i_sb, __func__, "bad extent address " 3347 EXT4_ERROR_INODE(inode, "bad extent address "
3242 "inode: %lu, iblock: %d, depth: %d", 3348 "iblock: %d, depth: %d pblock %lld",
3243 inode->i_ino, iblock, depth); 3349 iblock, depth, path[depth].p_block);
3244 err = -EIO; 3350 err = -EIO;
3245 goto out2; 3351 goto out2;
3246 } 3352 }
@@ -3258,7 +3364,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3258 */ 3364 */
3259 ee_len = ext4_ext_get_actual_len(ex); 3365 ee_len = ext4_ext_get_actual_len(ex);
3260 /* if found extent covers block, simply return it */ 3366 /* if found extent covers block, simply return it */
3261 if (iblock >= ee_block && iblock < ee_block + ee_len) { 3367 if (in_range(iblock, ee_block, ee_len)) {
3262 newblock = iblock - ee_block + ee_start; 3368 newblock = iblock - ee_block + ee_start;
3263 /* number of remaining blocks in the extent */ 3369 /* number of remaining blocks in the extent */
3264 allocated = ee_len - (iblock - ee_block); 3370 allocated = ee_len - (iblock - ee_block);
@@ -3350,21 +3456,35 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3350 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 3456 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
3351 ext4_ext_mark_uninitialized(&newex); 3457 ext4_ext_mark_uninitialized(&newex);
3352 /* 3458 /*
3353 * io_end structure was created for every async 3459 * io_end structure was created for every IO write to an
3354 * direct IO write to the middle of the file. 3460 * uninitialized extent. To avoid unecessary conversion,
3355 * To avoid unecessary convertion for every aio dio rewrite 3461 * here we flag the IO that really needs the conversion.
3356 * to the mid of file, here we flag the IO that is really
3357 * need the convertion.
3358 * For non asycn direct IO case, flag the inode state 3462 * For non asycn direct IO case, flag the inode state
3359 * that we need to perform convertion when IO is done. 3463 * that we need to perform convertion when IO is done.
3360 */ 3464 */
3361 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { 3465 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3362 if (io) 3466 if (io)
3363 io->flag = DIO_AIO_UNWRITTEN; 3467 io->flag = EXT4_IO_UNWRITTEN;
3364 else 3468 else
3365 EXT4_I(inode)->i_state |= 3469 ext4_set_inode_state(inode,
3366 EXT4_STATE_DIO_UNWRITTEN;; 3470 EXT4_STATE_DIO_UNWRITTEN);
3471 }
3472 if (ext4_should_dioread_nolock(inode))
3473 set_buffer_uninit(bh_result);
3474 }
3475
3476 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
3477 if (unlikely(!eh->eh_entries)) {
3478 EXT4_ERROR_INODE(inode,
3479 "eh->eh_entries == 0 ee_block %d",
3480 ex->ee_block);
3481 err = -EIO;
3482 goto out2;
3367 } 3483 }
3484 last_ex = EXT_LAST_EXTENT(eh);
3485 if (iblock + ar.len > le32_to_cpu(last_ex->ee_block)
3486 + ext4_ext_get_actual_len(last_ex))
3487 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
3368 } 3488 }
3369 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3489 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3370 if (err) { 3490 if (err) {
@@ -3499,6 +3619,13 @@ static void ext4_falloc_update_inode(struct inode *inode,
3499 i_size_write(inode, new_size); 3619 i_size_write(inode, new_size);
3500 if (new_size > EXT4_I(inode)->i_disksize) 3620 if (new_size > EXT4_I(inode)->i_disksize)
3501 ext4_update_i_disksize(inode, new_size); 3621 ext4_update_i_disksize(inode, new_size);
3622 } else {
3623 /*
3624 * Mark that we allocate beyond EOF so the subsequent truncate
3625 * can proceed even if the new size is the same as i_size.
3626 */
3627 if (new_size > i_size_read(inode))
3628 EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL;
3502 } 3629 }
3503 3630
3504} 3631}
@@ -3603,7 +3730,7 @@ retry:
3603 * Returns 0 on success. 3730 * Returns 0 on success.
3604 */ 3731 */
3605int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 3732int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3606 loff_t len) 3733 ssize_t len)
3607{ 3734{
3608 handle_t *handle; 3735 handle_t *handle;
3609 ext4_lblk_t block; 3736 ext4_lblk_t block;
@@ -3635,7 +3762,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3635 map_bh.b_state = 0; 3762 map_bh.b_state = 0;
3636 ret = ext4_get_blocks(handle, inode, block, 3763 ret = ext4_get_blocks(handle, inode, block,
3637 max_blocks, &map_bh, 3764 max_blocks, &map_bh,
3638 EXT4_GET_BLOCKS_DIO_CONVERT_EXT); 3765 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3639 if (ret <= 0) { 3766 if (ret <= 0) {
3640 WARN_ON(ret <= 0); 3767 WARN_ON(ret <= 0);
3641 printk(KERN_ERR "%s: ext4_ext_get_blocks " 3768 printk(KERN_ERR "%s: ext4_ext_get_blocks "
@@ -3739,7 +3866,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
3739 int error = 0; 3866 int error = 0;
3740 3867
3741 /* in-inode? */ 3868 /* in-inode? */
3742 if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { 3869 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3743 struct ext4_iloc iloc; 3870 struct ext4_iloc iloc;
3744 int offset; /* offset of xattr in inode */ 3871 int offset; /* offset of xattr in inode */
3745 3872
@@ -3767,7 +3894,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3767 __u64 start, __u64 len) 3894 __u64 start, __u64 len)
3768{ 3895{
3769 ext4_lblk_t start_blk; 3896 ext4_lblk_t start_blk;
3770 ext4_lblk_t len_blks;
3771 int error = 0; 3897 int error = 0;
3772 3898
3773 /* fallback to generic here if not in extents fmt */ 3899 /* fallback to generic here if not in extents fmt */
@@ -3781,8 +3907,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3781 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 3907 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3782 error = ext4_xattr_fiemap(inode, fieinfo); 3908 error = ext4_xattr_fiemap(inode, fieinfo);
3783 } else { 3909 } else {
3910 ext4_lblk_t len_blks;
3911 __u64 last_blk;
3912
3784 start_blk = start >> inode->i_sb->s_blocksize_bits; 3913 start_blk = start >> inode->i_sb->s_blocksize_bits;
3785 len_blks = len >> inode->i_sb->s_blocksize_bits; 3914 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
3915 if (last_blk >= EXT_MAX_BLOCK)
3916 last_blk = EXT_MAX_BLOCK-1;
3917 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3786 3918
3787 /* 3919 /*
3788 * Walk the extent tree gathering extent information. 3920 * Walk the extent tree gathering extent information.
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index a08a12998c49..d0776e410f34 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -36,9 +36,9 @@
36 */ 36 */
37static int ext4_release_file(struct inode *inode, struct file *filp) 37static int ext4_release_file(struct inode *inode, struct file *filp)
38{ 38{
39 if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) { 39 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
40 ext4_alloc_da_blocks(inode); 40 ext4_alloc_da_blocks(inode);
41 EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE; 41 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
42 } 42 }
43 /* if we are the last writer on the inode, drop the block reservation */ 43 /* if we are the last writer on the inode, drop the block reservation */
44 if ((filp->f_mode & FMODE_WRITE) && 44 if ((filp->f_mode & FMODE_WRITE) &&
@@ -117,11 +117,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
117 * devices or filesystem images. 117 * devices or filesystem images.
118 */ 118 */
119 memset(buf, 0, sizeof(buf)); 119 memset(buf, 0, sizeof(buf));
120 path.mnt = mnt->mnt_parent; 120 path.mnt = mnt;
121 path.dentry = mnt->mnt_mountpoint; 121 path.dentry = mnt->mnt_root;
122 path_get(&path);
123 cp = d_path(&path, buf, sizeof(buf)); 122 cp = d_path(&path, buf, sizeof(buf));
124 path_put(&path);
125 if (!IS_ERR(cp)) { 123 if (!IS_ERR(cp)) {
126 memcpy(sbi->s_es->s_last_mounted, cp, 124 memcpy(sbi->s_es->s_last_mounted, cp,
127 sizeof(sbi->s_es->s_last_mounted)); 125 sizeof(sbi->s_es->s_last_mounted));
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 98bd140aad01..0d0c3239c1cd 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -63,7 +63,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
63 if (inode->i_sb->s_flags & MS_RDONLY) 63 if (inode->i_sb->s_flags & MS_RDONLY)
64 return 0; 64 return 0;
65 65
66 ret = flush_aio_dio_completed_IO(inode); 66 ret = flush_completed_IO(inode);
67 if (ret < 0) 67 if (ret < 0)
68 return ret; 68 return ret;
69 69
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9bb2bb9f67ad..361c0b9962a8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,8 +76,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
76 /* If checksum is bad mark all blocks and inodes use to prevent 76 /* If checksum is bad mark all blocks and inodes use to prevent
77 * allocation, essentially implementing a per-group read-only flag. */ 77 * allocation, essentially implementing a per-group read-only flag. */
78 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 78 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
79 ext4_error(sb, __func__, "Checksum bad for group %u", 79 ext4_error(sb, "Checksum bad for group %u", block_group);
80 block_group);
81 ext4_free_blks_set(sb, gdp, 0); 80 ext4_free_blks_set(sb, gdp, 0);
82 ext4_free_inodes_set(sb, gdp, 0); 81 ext4_free_inodes_set(sb, gdp, 0);
83 ext4_itable_unused_set(sb, gdp, 0); 82 ext4_itable_unused_set(sb, gdp, 0);
@@ -111,8 +110,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
111 bitmap_blk = ext4_inode_bitmap(sb, desc); 110 bitmap_blk = ext4_inode_bitmap(sb, desc);
112 bh = sb_getblk(sb, bitmap_blk); 111 bh = sb_getblk(sb, bitmap_blk);
113 if (unlikely(!bh)) { 112 if (unlikely(!bh)) {
114 ext4_error(sb, __func__, 113 ext4_error(sb, "Cannot read inode bitmap - "
115 "Cannot read inode bitmap - "
116 "block_group = %u, inode_bitmap = %llu", 114 "block_group = %u, inode_bitmap = %llu",
117 block_group, bitmap_blk); 115 block_group, bitmap_blk);
118 return NULL; 116 return NULL;
@@ -153,8 +151,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
153 set_bitmap_uptodate(bh); 151 set_bitmap_uptodate(bh);
154 if (bh_submit_read(bh) < 0) { 152 if (bh_submit_read(bh) < 0) {
155 put_bh(bh); 153 put_bh(bh);
156 ext4_error(sb, __func__, 154 ext4_error(sb, "Cannot read inode bitmap - "
157 "Cannot read inode bitmap - "
158 "block_group = %u, inode_bitmap = %llu", 155 "block_group = %u, inode_bitmap = %llu",
159 block_group, bitmap_blk); 156 block_group, bitmap_blk);
160 return NULL; 157 return NULL;
@@ -229,8 +226,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
229 226
230 es = EXT4_SB(sb)->s_es; 227 es = EXT4_SB(sb)->s_es;
231 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { 228 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
232 ext4_error(sb, "ext4_free_inode", 229 ext4_error(sb, "reserved or nonexistent inode %lu", ino);
233 "reserved or nonexistent inode %lu", ino);
234 goto error_return; 230 goto error_return;
235 } 231 }
236 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 232 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
@@ -248,8 +244,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
248 cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), 244 cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
249 bit, bitmap_bh->b_data); 245 bit, bitmap_bh->b_data);
250 if (!cleared) 246 if (!cleared)
251 ext4_error(sb, "ext4_free_inode", 247 ext4_error(sb, "bit already cleared for inode %lu", ino);
252 "bit already cleared for inode %lu", ino);
253 else { 248 else {
254 gdp = ext4_get_group_desc(sb, block_group, &bh2); 249 gdp = ext4_get_group_desc(sb, block_group, &bh2);
255 250
@@ -736,8 +731,7 @@ static int ext4_claim_inode(struct super_block *sb,
736 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || 731 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
737 ino > EXT4_INODES_PER_GROUP(sb)) { 732 ino > EXT4_INODES_PER_GROUP(sb)) {
738 ext4_unlock_group(sb, group); 733 ext4_unlock_group(sb, group);
739 ext4_error(sb, __func__, 734 ext4_error(sb, "reserved inode or inode > inodes count - "
740 "reserved inode or inode > inodes count - "
741 "block_group = %u, inode=%lu", group, 735 "block_group = %u, inode=%lu", group,
742 ino + group * EXT4_INODES_PER_GROUP(sb)); 736 ino + group * EXT4_INODES_PER_GROUP(sb));
743 return 1; 737 return 1;
@@ -904,7 +898,7 @@ repeat_in_this_group:
904 BUFFER_TRACE(inode_bitmap_bh, 898 BUFFER_TRACE(inode_bitmap_bh,
905 "call ext4_handle_dirty_metadata"); 899 "call ext4_handle_dirty_metadata");
906 err = ext4_handle_dirty_metadata(handle, 900 err = ext4_handle_dirty_metadata(handle,
907 inode, 901 NULL,
908 inode_bitmap_bh); 902 inode_bitmap_bh);
909 if (err) 903 if (err)
910 goto fail; 904 goto fail;
@@ -1029,7 +1023,8 @@ got:
1029 inode->i_generation = sbi->s_next_generation++; 1023 inode->i_generation = sbi->s_next_generation++;
1030 spin_unlock(&sbi->s_next_gen_lock); 1024 spin_unlock(&sbi->s_next_gen_lock);
1031 1025
1032 ei->i_state = EXT4_STATE_NEW; 1026 ei->i_state_flags = 0;
1027 ext4_set_inode_state(inode, EXT4_STATE_NEW);
1033 1028
1034 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 1029 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1035 1030
@@ -1098,8 +1093,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1098 1093
1099 /* Error cases - e2fsck has already cleaned up for us */ 1094 /* Error cases - e2fsck has already cleaned up for us */
1100 if (ino > max_ino) { 1095 if (ino > max_ino) {
1101 ext4_warning(sb, __func__, 1096 ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
1102 "bad orphan ino %lu! e2fsck was run?", ino);
1103 goto error; 1097 goto error;
1104 } 1098 }
1105 1099
@@ -1107,8 +1101,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1107 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 1101 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1108 bitmap_bh = ext4_read_inode_bitmap(sb, block_group); 1102 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1109 if (!bitmap_bh) { 1103 if (!bitmap_bh) {
1110 ext4_warning(sb, __func__, 1104 ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
1111 "inode bitmap error for orphan %lu", ino);
1112 goto error; 1105 goto error;
1113 } 1106 }
1114 1107
@@ -1140,8 +1133,7 @@ iget_failed:
1140 err = PTR_ERR(inode); 1133 err = PTR_ERR(inode);
1141 inode = NULL; 1134 inode = NULL;
1142bad_orphan: 1135bad_orphan:
1143 ext4_warning(sb, __func__, 1136 ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
1144 "bad orphan inode %lu! e2fsck was run?", ino);
1145 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", 1137 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1146 bit, (unsigned long long)bitmap_bh->b_blocknr, 1138 bit, (unsigned long long)bitmap_bh->b_blocknr,
1147 ext4_test_bit(bit, bitmap_bh->b_data)); 1139 ext4_test_bit(bit, bitmap_bh->b_data));
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bec222ca9ba4..986120f30066 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -38,6 +38,7 @@
38#include <linux/uio.h> 38#include <linux/uio.h>
39#include <linux/bio.h> 39#include <linux/bio.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/kernel.h>
41 42
42#include "ext4_jbd2.h" 43#include "ext4_jbd2.h"
43#include "xattr.h" 44#include "xattr.h"
@@ -197,7 +198,7 @@ void ext4_delete_inode(struct inode *inode)
197 inode->i_size = 0; 198 inode->i_size = 0;
198 err = ext4_mark_inode_dirty(handle, inode); 199 err = ext4_mark_inode_dirty(handle, inode);
199 if (err) { 200 if (err) {
200 ext4_warning(inode->i_sb, __func__, 201 ext4_warning(inode->i_sb,
201 "couldn't mark inode dirty (err %d)", err); 202 "couldn't mark inode dirty (err %d)", err);
202 goto stop_handle; 203 goto stop_handle;
203 } 204 }
@@ -215,7 +216,7 @@ void ext4_delete_inode(struct inode *inode)
215 if (err > 0) 216 if (err > 0)
216 err = ext4_journal_restart(handle, 3); 217 err = ext4_journal_restart(handle, 3);
217 if (err != 0) { 218 if (err != 0) {
218 ext4_warning(inode->i_sb, __func__, 219 ext4_warning(inode->i_sb,
219 "couldn't extend journal (err %d)", err); 220 "couldn't extend journal (err %d)", err);
220 stop_handle: 221 stop_handle:
221 ext4_journal_stop(handle); 222 ext4_journal_stop(handle);
@@ -326,8 +327,7 @@ static int ext4_block_to_path(struct inode *inode,
326 offsets[n++] = i_block & (ptrs - 1); 327 offsets[n++] = i_block & (ptrs - 1);
327 final = ptrs; 328 final = ptrs;
328 } else { 329 } else {
329 ext4_warning(inode->i_sb, "ext4_block_to_path", 330 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
330 "block %lu > max in inode %lu",
331 i_block + direct_blocks + 331 i_block + direct_blocks +
332 indirect_blocks + double_blocks, inode->i_ino); 332 indirect_blocks + double_blocks, inode->i_ino);
333 } 333 }
@@ -347,7 +347,7 @@ static int __ext4_check_blockref(const char *function, struct inode *inode,
347 if (blk && 347 if (blk &&
348 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 348 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
349 blk, 1))) { 349 blk, 1))) {
350 ext4_error(inode->i_sb, function, 350 __ext4_error(inode->i_sb, function,
351 "invalid block reference %u " 351 "invalid block reference %u "
352 "in inode #%lu", blk, inode->i_ino); 352 "in inode #%lu", blk, inode->i_ino);
353 return -EIO; 353 return -EIO;
@@ -610,7 +610,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
610 if (*err) 610 if (*err)
611 goto failed_out; 611 goto failed_out;
612 612
613 BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); 613 if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
614 EXT4_ERROR_INODE(inode,
615 "current_block %llu + count %lu > %d!",
616 current_block, count,
617 EXT4_MAX_BLOCK_FILE_PHYS);
618 *err = -EIO;
619 goto failed_out;
620 }
614 621
615 target -= count; 622 target -= count;
616 /* allocate blocks for indirect blocks */ 623 /* allocate blocks for indirect blocks */
@@ -646,7 +653,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
646 ar.flags = EXT4_MB_HINT_DATA; 653 ar.flags = EXT4_MB_HINT_DATA;
647 654
648 current_block = ext4_mb_new_blocks(handle, &ar, err); 655 current_block = ext4_mb_new_blocks(handle, &ar, err);
649 BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); 656 if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
657 EXT4_ERROR_INODE(inode,
658 "current_block %llu + ar.len %d > %d!",
659 current_block, ar.len,
660 EXT4_MAX_BLOCK_FILE_PHYS);
661 *err = -EIO;
662 goto failed_out;
663 }
650 664
651 if (*err && (target == blks)) { 665 if (*err && (target == blks)) {
652 /* 666 /*
@@ -1064,6 +1078,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
1064 int mdb_free = 0, allocated_meta_blocks = 0; 1078 int mdb_free = 0, allocated_meta_blocks = 0;
1065 1079
1066 spin_lock(&ei->i_block_reservation_lock); 1080 spin_lock(&ei->i_block_reservation_lock);
1081 trace_ext4_da_update_reserve_space(inode, used);
1067 if (unlikely(used > ei->i_reserved_data_blocks)) { 1082 if (unlikely(used > ei->i_reserved_data_blocks)) {
1068 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 1083 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
1069 "with only %d reserved data blocks\n", 1084 "with only %d reserved data blocks\n",
@@ -1127,7 +1142,7 @@ static int check_block_validity(struct inode *inode, const char *msg,
1127 sector_t logical, sector_t phys, int len) 1142 sector_t logical, sector_t phys, int len)
1128{ 1143{
1129 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { 1144 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
1130 ext4_error(inode->i_sb, msg, 1145 __ext4_error(inode->i_sb, msg,
1131 "inode #%lu logical block %llu mapped to %llu " 1146 "inode #%lu logical block %llu mapped to %llu "
1132 "(size %d)", inode->i_ino, 1147 "(size %d)", inode->i_ino,
1133 (unsigned long long) logical, 1148 (unsigned long long) logical,
@@ -1309,7 +1324,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1309 * i_data's format changing. Force the migrate 1324 * i_data's format changing. Force the migrate
1310 * to fail by clearing migrate flags 1325 * to fail by clearing migrate flags
1311 */ 1326 */
1312 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; 1327 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
1313 } 1328 }
1314 1329
1315 /* 1330 /*
@@ -1537,6 +1552,8 @@ static void ext4_truncate_failed_write(struct inode *inode)
1537 ext4_truncate(inode); 1552 ext4_truncate(inode);
1538} 1553}
1539 1554
1555static int ext4_get_block_write(struct inode *inode, sector_t iblock,
1556 struct buffer_head *bh_result, int create);
1540static int ext4_write_begin(struct file *file, struct address_space *mapping, 1557static int ext4_write_begin(struct file *file, struct address_space *mapping,
1541 loff_t pos, unsigned len, unsigned flags, 1558 loff_t pos, unsigned len, unsigned flags,
1542 struct page **pagep, void **fsdata) 1559 struct page **pagep, void **fsdata)
@@ -1578,8 +1595,12 @@ retry:
1578 } 1595 }
1579 *pagep = page; 1596 *pagep = page;
1580 1597
1581 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1598 if (ext4_should_dioread_nolock(inode))
1582 ext4_get_block); 1599 ret = block_write_begin(file, mapping, pos, len, flags, pagep,
1600 fsdata, ext4_get_block_write);
1601 else
1602 ret = block_write_begin(file, mapping, pos, len, flags, pagep,
1603 fsdata, ext4_get_block);
1583 1604
1584 if (!ret && ext4_should_journal_data(inode)) { 1605 if (!ret && ext4_should_journal_data(inode)) {
1585 ret = walk_page_buffers(handle, page_buffers(page), 1606 ret = walk_page_buffers(handle, page_buffers(page),
@@ -1796,7 +1817,7 @@ static int ext4_journalled_write_end(struct file *file,
1796 new_i_size = pos + copied; 1817 new_i_size = pos + copied;
1797 if (new_i_size > inode->i_size) 1818 if (new_i_size > inode->i_size)
1798 i_size_write(inode, pos+copied); 1819 i_size_write(inode, pos+copied);
1799 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1820 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1800 if (new_i_size > EXT4_I(inode)->i_disksize) { 1821 if (new_i_size > EXT4_I(inode)->i_disksize) {
1801 ext4_update_i_disksize(inode, new_i_size); 1822 ext4_update_i_disksize(inode, new_i_size);
1802 ret2 = ext4_mark_inode_dirty(handle, inode); 1823 ret2 = ext4_mark_inode_dirty(handle, inode);
@@ -1850,6 +1871,7 @@ repeat:
1850 spin_lock(&ei->i_block_reservation_lock); 1871 spin_lock(&ei->i_block_reservation_lock);
1851 md_reserved = ei->i_reserved_meta_blocks; 1872 md_reserved = ei->i_reserved_meta_blocks;
1852 md_needed = ext4_calc_metadata_amount(inode, lblock); 1873 md_needed = ext4_calc_metadata_amount(inode, lblock);
1874 trace_ext4_da_reserve_space(inode, md_needed);
1853 spin_unlock(&ei->i_block_reservation_lock); 1875 spin_unlock(&ei->i_block_reservation_lock);
1854 1876
1855 /* 1877 /*
@@ -2096,6 +2118,8 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2096 } else if (buffer_mapped(bh)) 2118 } else if (buffer_mapped(bh))
2097 BUG_ON(bh->b_blocknr != pblock); 2119 BUG_ON(bh->b_blocknr != pblock);
2098 2120
2121 if (buffer_uninit(exbh))
2122 set_buffer_uninit(bh);
2099 cur_logical++; 2123 cur_logical++;
2100 pblock++; 2124 pblock++;
2101 } while ((bh = bh->b_this_page) != head); 2125 } while ((bh = bh->b_this_page) != head);
@@ -2138,17 +2162,16 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2138 break; 2162 break;
2139 for (i = 0; i < nr_pages; i++) { 2163 for (i = 0; i < nr_pages; i++) {
2140 struct page *page = pvec.pages[i]; 2164 struct page *page = pvec.pages[i];
2141 index = page->index; 2165 if (page->index > end)
2142 if (index > end)
2143 break; 2166 break;
2144 index++;
2145
2146 BUG_ON(!PageLocked(page)); 2167 BUG_ON(!PageLocked(page));
2147 BUG_ON(PageWriteback(page)); 2168 BUG_ON(PageWriteback(page));
2148 block_invalidatepage(page, 0); 2169 block_invalidatepage(page, 0);
2149 ClearPageUptodate(page); 2170 ClearPageUptodate(page);
2150 unlock_page(page); 2171 unlock_page(page);
2151 } 2172 }
2173 index = pvec.pages[nr_pages - 1]->index + 1;
2174 pagevec_release(&pvec);
2152 } 2175 }
2153 return; 2176 return;
2154} 2177}
@@ -2225,6 +2248,8 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2225 */ 2248 */
2226 new.b_state = 0; 2249 new.b_state = 0;
2227 get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 2250 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2251 if (ext4_should_dioread_nolock(mpd->inode))
2252 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2228 if (mpd->b_state & (1 << BH_Delay)) 2253 if (mpd->b_state & (1 << BH_Delay))
2229 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2254 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2230 2255
@@ -2635,11 +2660,14 @@ static int __ext4_journalled_writepage(struct page *page,
2635 ret = err; 2660 ret = err;
2636 2661
2637 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 2662 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2638 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 2663 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2639out: 2664out:
2640 return ret; 2665 return ret;
2641} 2666}
2642 2667
2668static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
2669static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
2670
2643/* 2671/*
2644 * Note that we don't need to start a transaction unless we're journaling data 2672 * Note that we don't need to start a transaction unless we're journaling data
2645 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2673 * because we should have holes filled from ext4_page_mkwrite(). We even don't
@@ -2687,7 +2715,7 @@ static int ext4_writepage(struct page *page,
2687 int ret = 0; 2715 int ret = 0;
2688 loff_t size; 2716 loff_t size;
2689 unsigned int len; 2717 unsigned int len;
2690 struct buffer_head *page_bufs; 2718 struct buffer_head *page_bufs = NULL;
2691 struct inode *inode = page->mapping->host; 2719 struct inode *inode = page->mapping->host;
2692 2720
2693 trace_ext4_writepage(inode, page); 2721 trace_ext4_writepage(inode, page);
@@ -2763,7 +2791,11 @@ static int ext4_writepage(struct page *page,
2763 2791
2764 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2792 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2765 ret = nobh_writepage(page, noalloc_get_block_write, wbc); 2793 ret = nobh_writepage(page, noalloc_get_block_write, wbc);
2766 else 2794 else if (page_bufs && buffer_uninit(page_bufs)) {
2795 ext4_set_bh_endio(page_bufs, inode);
2796 ret = block_write_full_page_endio(page, noalloc_get_block_write,
2797 wbc, ext4_end_io_buffer_write);
2798 } else
2767 ret = block_write_full_page(page, noalloc_get_block_write, 2799 ret = block_write_full_page(page, noalloc_get_block_write,
2768 wbc); 2800 wbc);
2769 2801
@@ -3306,7 +3338,8 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3306 filemap_write_and_wait(mapping); 3338 filemap_write_and_wait(mapping);
3307 } 3339 }
3308 3340
3309 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 3341 if (EXT4_JOURNAL(inode) &&
3342 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3310 /* 3343 /*
3311 * This is a REALLY heavyweight approach, but the use of 3344 * This is a REALLY heavyweight approach, but the use of
3312 * bmap on dirty files is expected to be extremely rare: 3345 * bmap on dirty files is expected to be extremely rare:
@@ -3325,7 +3358,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3325 * everything they get. 3358 * everything they get.
3326 */ 3359 */
3327 3360
3328 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 3361 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3329 journal = EXT4_JOURNAL(inode); 3362 journal = EXT4_JOURNAL(inode);
3330 jbd2_journal_lock_updates(journal); 3363 jbd2_journal_lock_updates(journal);
3331 err = jbd2_journal_flush(journal); 3364 err = jbd2_journal_flush(journal);
@@ -3350,11 +3383,45 @@ ext4_readpages(struct file *file, struct address_space *mapping,
3350 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3383 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3351} 3384}
3352 3385
3386static void ext4_free_io_end(ext4_io_end_t *io)
3387{
3388 BUG_ON(!io);
3389 if (io->page)
3390 put_page(io->page);
3391 iput(io->inode);
3392 kfree(io);
3393}
3394
3395static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
3396{
3397 struct buffer_head *head, *bh;
3398 unsigned int curr_off = 0;
3399
3400 if (!page_has_buffers(page))
3401 return;
3402 head = bh = page_buffers(page);
3403 do {
3404 if (offset <= curr_off && test_clear_buffer_uninit(bh)
3405 && bh->b_private) {
3406 ext4_free_io_end(bh->b_private);
3407 bh->b_private = NULL;
3408 bh->b_end_io = NULL;
3409 }
3410 curr_off = curr_off + bh->b_size;
3411 bh = bh->b_this_page;
3412 } while (bh != head);
3413}
3414
3353static void ext4_invalidatepage(struct page *page, unsigned long offset) 3415static void ext4_invalidatepage(struct page *page, unsigned long offset)
3354{ 3416{
3355 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3417 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3356 3418
3357 /* 3419 /*
3420 * free any io_end structure allocated for buffers to be discarded
3421 */
3422 if (ext4_should_dioread_nolock(page->mapping->host))
3423 ext4_invalidatepage_free_endio(page, offset);
3424 /*
3358 * If it's a full truncate we just forget about the pending dirtying 3425 * If it's a full truncate we just forget about the pending dirtying
3359 */ 3426 */
3360 if (offset == 0) 3427 if (offset == 0)
@@ -3425,7 +3492,14 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3425 } 3492 }
3426 3493
3427retry: 3494retry:
3428 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3495 if (rw == READ && ext4_should_dioread_nolock(inode))
3496 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
3497 inode->i_sb->s_bdev, iov,
3498 offset, nr_segs,
3499 ext4_get_block, NULL);
3500 else
3501 ret = blockdev_direct_IO(rw, iocb, inode,
3502 inode->i_sb->s_bdev, iov,
3429 offset, nr_segs, 3503 offset, nr_segs,
3430 ext4_get_block, NULL); 3504 ext4_get_block, NULL);
3431 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3505 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3441,6 +3515,9 @@ retry:
3441 * but cannot extend i_size. Bail out and pretend 3515 * but cannot extend i_size. Bail out and pretend
3442 * the write failed... */ 3516 * the write failed... */
3443 ret = PTR_ERR(handle); 3517 ret = PTR_ERR(handle);
3518 if (inode->i_nlink)
3519 ext4_orphan_del(NULL, inode);
3520
3444 goto out; 3521 goto out;
3445 } 3522 }
3446 if (inode->i_nlink) 3523 if (inode->i_nlink)
@@ -3468,75 +3545,63 @@ out:
3468 return ret; 3545 return ret;
3469} 3546}
3470 3547
3471static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock, 3548static int ext4_get_block_write(struct inode *inode, sector_t iblock,
3472 struct buffer_head *bh_result, int create) 3549 struct buffer_head *bh_result, int create)
3473{ 3550{
3474 handle_t *handle = NULL; 3551 handle_t *handle = ext4_journal_current_handle();
3475 int ret = 0; 3552 int ret = 0;
3476 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 3553 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
3477 int dio_credits; 3554 int dio_credits;
3555 int started = 0;
3478 3556
3479 ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n", 3557 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3480 inode->i_ino, create); 3558 inode->i_ino, create);
3481 /* 3559 /*
3482 * DIO VFS code passes create = 0 flag for write to 3560 * ext4_get_block in prepare for a DIO write or buffer write.
3483 * the middle of file. It does this to avoid block 3561 * We allocate an uinitialized extent if blocks haven't been allocated.
3484 * allocation for holes, to prevent expose stale data 3562 * The extent will be converted to initialized after IO complete.
3485 * out when there is parallel buffered read (which does
3486 * not hold the i_mutex lock) while direct IO write has
3487 * not completed. DIO request on holes finally falls back
3488 * to buffered IO for this reason.
3489 *
3490 * For ext4 extent based file, since we support fallocate,
3491 * new allocated extent as uninitialized, for holes, we
3492 * could fallocate blocks for holes, thus parallel
3493 * buffered IO read will zero out the page when read on
3494 * a hole while parallel DIO write to the hole has not completed.
3495 *
3496 * when we come here, we know it's a direct IO write to
3497 * to the middle of file (<i_size)
3498 * so it's safe to override the create flag from VFS.
3499 */ 3563 */
3500 create = EXT4_GET_BLOCKS_DIO_CREATE_EXT; 3564 create = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3501 3565
3502 if (max_blocks > DIO_MAX_BLOCKS) 3566 if (!handle) {
3503 max_blocks = DIO_MAX_BLOCKS; 3567 if (max_blocks > DIO_MAX_BLOCKS)
3504 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 3568 max_blocks = DIO_MAX_BLOCKS;
3505 handle = ext4_journal_start(inode, dio_credits); 3569 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
3506 if (IS_ERR(handle)) { 3570 handle = ext4_journal_start(inode, dio_credits);
3507 ret = PTR_ERR(handle); 3571 if (IS_ERR(handle)) {
3508 goto out; 3572 ret = PTR_ERR(handle);
3573 goto out;
3574 }
3575 started = 1;
3509 } 3576 }
3577
3510 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 3578 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
3511 create); 3579 create);
3512 if (ret > 0) { 3580 if (ret > 0) {
3513 bh_result->b_size = (ret << inode->i_blkbits); 3581 bh_result->b_size = (ret << inode->i_blkbits);
3514 ret = 0; 3582 ret = 0;
3515 } 3583 }
3516 ext4_journal_stop(handle); 3584 if (started)
3585 ext4_journal_stop(handle);
3517out: 3586out:
3518 return ret; 3587 return ret;
3519} 3588}
3520 3589
3521static void ext4_free_io_end(ext4_io_end_t *io) 3590static void dump_completed_IO(struct inode * inode)
3522{
3523 BUG_ON(!io);
3524 iput(io->inode);
3525 kfree(io);
3526}
3527static void dump_aio_dio_list(struct inode * inode)
3528{ 3591{
3529#ifdef EXT4_DEBUG 3592#ifdef EXT4_DEBUG
3530 struct list_head *cur, *before, *after; 3593 struct list_head *cur, *before, *after;
3531 ext4_io_end_t *io, *io0, *io1; 3594 ext4_io_end_t *io, *io0, *io1;
3595 unsigned long flags;
3532 3596
3533 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ 3597 if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
3534 ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino); 3598 ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
3535 return; 3599 return;
3536 } 3600 }
3537 3601
3538 ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino); 3602 ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
3539 list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){ 3603 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
3604 list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
3540 cur = &io->list; 3605 cur = &io->list;
3541 before = cur->prev; 3606 before = cur->prev;
3542 io0 = container_of(before, ext4_io_end_t, list); 3607 io0 = container_of(before, ext4_io_end_t, list);
@@ -3546,32 +3611,31 @@ static void dump_aio_dio_list(struct inode * inode)
3546 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", 3611 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
3547 io, inode->i_ino, io0, io1); 3612 io, inode->i_ino, io0, io1);
3548 } 3613 }
3614 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
3549#endif 3615#endif
3550} 3616}
3551 3617
3552/* 3618/*
3553 * check a range of space and convert unwritten extents to written. 3619 * check a range of space and convert unwritten extents to written.
3554 */ 3620 */
3555static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) 3621static int ext4_end_io_nolock(ext4_io_end_t *io)
3556{ 3622{
3557 struct inode *inode = io->inode; 3623 struct inode *inode = io->inode;
3558 loff_t offset = io->offset; 3624 loff_t offset = io->offset;
3559 size_t size = io->size; 3625 ssize_t size = io->size;
3560 int ret = 0; 3626 int ret = 0;
3561 3627
3562 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," 3628 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
3563 "list->prev 0x%p\n", 3629 "list->prev 0x%p\n",
3564 io, inode->i_ino, io->list.next, io->list.prev); 3630 io, inode->i_ino, io->list.next, io->list.prev);
3565 3631
3566 if (list_empty(&io->list)) 3632 if (list_empty(&io->list))
3567 return ret; 3633 return ret;
3568 3634
3569 if (io->flag != DIO_AIO_UNWRITTEN) 3635 if (io->flag != EXT4_IO_UNWRITTEN)
3570 return ret; 3636 return ret;
3571 3637
3572 if (offset + size <= i_size_read(inode)) 3638 ret = ext4_convert_unwritten_extents(inode, offset, size);
3573 ret = ext4_convert_unwritten_extents(inode, offset, size);
3574
3575 if (ret < 0) { 3639 if (ret < 0) {
3576 printk(KERN_EMERG "%s: failed to convert unwritten" 3640 printk(KERN_EMERG "%s: failed to convert unwritten"
3577 "extents to written extents, error is %d" 3641 "extents to written extents, error is %d"
@@ -3584,50 +3648,64 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
3584 io->flag = 0; 3648 io->flag = 0;
3585 return ret; 3649 return ret;
3586} 3650}
3651
3587/* 3652/*
3588 * work on completed aio dio IO, to convert unwritten extents to extents 3653 * work on completed aio dio IO, to convert unwritten extents to extents
3589 */ 3654 */
3590static void ext4_end_aio_dio_work(struct work_struct *work) 3655static void ext4_end_io_work(struct work_struct *work)
3591{ 3656{
3592 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); 3657 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
3593 struct inode *inode = io->inode; 3658 struct inode *inode = io->inode;
3594 int ret = 0; 3659 struct ext4_inode_info *ei = EXT4_I(inode);
3660 unsigned long flags;
3661 int ret;
3595 3662
3596 mutex_lock(&inode->i_mutex); 3663 mutex_lock(&inode->i_mutex);
3597 ret = ext4_end_aio_dio_nolock(io); 3664 ret = ext4_end_io_nolock(io);
3598 if (ret >= 0) { 3665 if (ret < 0) {
3599 if (!list_empty(&io->list)) 3666 mutex_unlock(&inode->i_mutex);
3600 list_del_init(&io->list); 3667 return;
3601 ext4_free_io_end(io);
3602 } 3668 }
3669
3670 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3671 if (!list_empty(&io->list))
3672 list_del_init(&io->list);
3673 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3603 mutex_unlock(&inode->i_mutex); 3674 mutex_unlock(&inode->i_mutex);
3675 ext4_free_io_end(io);
3604} 3676}
3677
3605/* 3678/*
3606 * This function is called from ext4_sync_file(). 3679 * This function is called from ext4_sync_file().
3607 * 3680 *
3608 * When AIO DIO IO is completed, the work to convert unwritten 3681 * When IO is completed, the work to convert unwritten extents to
3609 * extents to written is queued on workqueue but may not get immediately 3682 * written is queued on workqueue but may not get immediately
3610 * scheduled. When fsync is called, we need to ensure the 3683 * scheduled. When fsync is called, we need to ensure the
3611 * conversion is complete before fsync returns. 3684 * conversion is complete before fsync returns.
3612 * The inode keeps track of a list of completed AIO from DIO path 3685 * The inode keeps track of a list of pending/completed IO that
3613 * that might needs to do the conversion. This function walks through 3686 * might needs to do the conversion. This function walks through
3614 * the list and convert the related unwritten extents to written. 3687 * the list and convert the related unwritten extents for completed IO
3688 * to written.
3689 * The function return the number of pending IOs on success.
3615 */ 3690 */
3616int flush_aio_dio_completed_IO(struct inode *inode) 3691int flush_completed_IO(struct inode *inode)
3617{ 3692{
3618 ext4_io_end_t *io; 3693 ext4_io_end_t *io;
3694 struct ext4_inode_info *ei = EXT4_I(inode);
3695 unsigned long flags;
3619 int ret = 0; 3696 int ret = 0;
3620 int ret2 = 0; 3697 int ret2 = 0;
3621 3698
3622 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)) 3699 if (list_empty(&ei->i_completed_io_list))
3623 return ret; 3700 return ret;
3624 3701
3625 dump_aio_dio_list(inode); 3702 dump_completed_IO(inode);
3626 while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ 3703 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3627 io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next, 3704 while (!list_empty(&ei->i_completed_io_list)){
3705 io = list_entry(ei->i_completed_io_list.next,
3628 ext4_io_end_t, list); 3706 ext4_io_end_t, list);
3629 /* 3707 /*
3630 * Calling ext4_end_aio_dio_nolock() to convert completed 3708 * Calling ext4_end_io_nolock() to convert completed
3631 * IO to written. 3709 * IO to written.
3632 * 3710 *
3633 * When ext4_sync_file() is called, run_queue() may already 3711 * When ext4_sync_file() is called, run_queue() may already
@@ -3640,20 +3718,23 @@ int flush_aio_dio_completed_IO(struct inode *inode)
3640 * avoid double converting from both fsync and background work 3718 * avoid double converting from both fsync and background work
3641 * queue work. 3719 * queue work.
3642 */ 3720 */
3643 ret = ext4_end_aio_dio_nolock(io); 3721 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3722 ret = ext4_end_io_nolock(io);
3723 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3644 if (ret < 0) 3724 if (ret < 0)
3645 ret2 = ret; 3725 ret2 = ret;
3646 else 3726 else
3647 list_del_init(&io->list); 3727 list_del_init(&io->list);
3648 } 3728 }
3729 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3649 return (ret2 < 0) ? ret2 : 0; 3730 return (ret2 < 0) ? ret2 : 0;
3650} 3731}
3651 3732
3652static ext4_io_end_t *ext4_init_io_end (struct inode *inode) 3733static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
3653{ 3734{
3654 ext4_io_end_t *io = NULL; 3735 ext4_io_end_t *io = NULL;
3655 3736
3656 io = kmalloc(sizeof(*io), GFP_NOFS); 3737 io = kmalloc(sizeof(*io), flags);
3657 3738
3658 if (io) { 3739 if (io) {
3659 igrab(inode); 3740 igrab(inode);
@@ -3661,8 +3742,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
3661 io->flag = 0; 3742 io->flag = 0;
3662 io->offset = 0; 3743 io->offset = 0;
3663 io->size = 0; 3744 io->size = 0;
3664 io->error = 0; 3745 io->page = NULL;
3665 INIT_WORK(&io->work, ext4_end_aio_dio_work); 3746 INIT_WORK(&io->work, ext4_end_io_work);
3666 INIT_LIST_HEAD(&io->list); 3747 INIT_LIST_HEAD(&io->list);
3667 } 3748 }
3668 3749
@@ -3674,6 +3755,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3674{ 3755{
3675 ext4_io_end_t *io_end = iocb->private; 3756 ext4_io_end_t *io_end = iocb->private;
3676 struct workqueue_struct *wq; 3757 struct workqueue_struct *wq;
3758 unsigned long flags;
3759 struct ext4_inode_info *ei;
3677 3760
3678 /* if not async direct IO or dio with 0 bytes write, just return */ 3761 /* if not async direct IO or dio with 0 bytes write, just return */
3679 if (!io_end || !size) 3762 if (!io_end || !size)
@@ -3685,7 +3768,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3685 size); 3768 size);
3686 3769
3687 /* if not aio dio with unwritten extents, just free io and return */ 3770 /* if not aio dio with unwritten extents, just free io and return */
3688 if (io_end->flag != DIO_AIO_UNWRITTEN){ 3771 if (io_end->flag != EXT4_IO_UNWRITTEN){
3689 ext4_free_io_end(io_end); 3772 ext4_free_io_end(io_end);
3690 iocb->private = NULL; 3773 iocb->private = NULL;
3691 return; 3774 return;
@@ -3693,16 +3776,85 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3693 3776
3694 io_end->offset = offset; 3777 io_end->offset = offset;
3695 io_end->size = size; 3778 io_end->size = size;
3779 io_end->flag = EXT4_IO_UNWRITTEN;
3696 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 3780 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
3697 3781
3698 /* queue the work to convert unwritten extents to written */ 3782 /* queue the work to convert unwritten extents to written */
3699 queue_work(wq, &io_end->work); 3783 queue_work(wq, &io_end->work);
3700 3784
3701 /* Add the io_end to per-inode completed aio dio list*/ 3785 /* Add the io_end to per-inode completed aio dio list*/
3702 list_add_tail(&io_end->list, 3786 ei = EXT4_I(io_end->inode);
3703 &EXT4_I(io_end->inode)->i_aio_dio_complete_list); 3787 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3788 list_add_tail(&io_end->list, &ei->i_completed_io_list);
3789 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3704 iocb->private = NULL; 3790 iocb->private = NULL;
3705} 3791}
3792
3793static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
3794{
3795 ext4_io_end_t *io_end = bh->b_private;
3796 struct workqueue_struct *wq;
3797 struct inode *inode;
3798 unsigned long flags;
3799
3800 if (!test_clear_buffer_uninit(bh) || !io_end)
3801 goto out;
3802
3803 if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
3804 printk("sb umounted, discard end_io request for inode %lu\n",
3805 io_end->inode->i_ino);
3806 ext4_free_io_end(io_end);
3807 goto out;
3808 }
3809
3810 io_end->flag = EXT4_IO_UNWRITTEN;
3811 inode = io_end->inode;
3812
3813 /* Add the io_end to per-inode completed io list*/
3814 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
3815 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
3816 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
3817
3818 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
3819 /* queue the work to convert unwritten extents to written */
3820 queue_work(wq, &io_end->work);
3821out:
3822 bh->b_private = NULL;
3823 bh->b_end_io = NULL;
3824 clear_buffer_uninit(bh);
3825 end_buffer_async_write(bh, uptodate);
3826}
3827
3828static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
3829{
3830 ext4_io_end_t *io_end;
3831 struct page *page = bh->b_page;
3832 loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
3833 size_t size = bh->b_size;
3834
3835retry:
3836 io_end = ext4_init_io_end(inode, GFP_ATOMIC);
3837 if (!io_end) {
3838 if (printk_ratelimit())
3839 printk(KERN_WARNING "%s: allocation fail\n", __func__);
3840 schedule();
3841 goto retry;
3842 }
3843 io_end->offset = offset;
3844 io_end->size = size;
3845 /*
3846 * We need to hold a reference to the page to make sure it
3847 * doesn't get evicted before ext4_end_io_work() has a chance
3848 * to convert the extent from written to unwritten.
3849 */
3850 io_end->page = page;
3851 get_page(io_end->page);
3852
3853 bh->b_private = io_end;
3854 bh->b_end_io = ext4_end_io_buffer_write;
3855 return 0;
3856}
3857
3706/* 3858/*
3707 * For ext4 extent files, ext4 will do direct-io write to holes, 3859 * For ext4 extent files, ext4 will do direct-io write to holes,
3708 * preallocated extents, and those write extend the file, no need to 3860 * preallocated extents, and those write extend the file, no need to
@@ -3756,7 +3908,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3756 iocb->private = NULL; 3908 iocb->private = NULL;
3757 EXT4_I(inode)->cur_aio_dio = NULL; 3909 EXT4_I(inode)->cur_aio_dio = NULL;
3758 if (!is_sync_kiocb(iocb)) { 3910 if (!is_sync_kiocb(iocb)) {
3759 iocb->private = ext4_init_io_end(inode); 3911 iocb->private = ext4_init_io_end(inode, GFP_NOFS);
3760 if (!iocb->private) 3912 if (!iocb->private)
3761 return -ENOMEM; 3913 return -ENOMEM;
3762 /* 3914 /*
@@ -3772,7 +3924,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3772 ret = blockdev_direct_IO(rw, iocb, inode, 3924 ret = blockdev_direct_IO(rw, iocb, inode,
3773 inode->i_sb->s_bdev, iov, 3925 inode->i_sb->s_bdev, iov,
3774 offset, nr_segs, 3926 offset, nr_segs,
3775 ext4_get_block_dio_write, 3927 ext4_get_block_write,
3776 ext4_end_io_dio); 3928 ext4_end_io_dio);
3777 if (iocb->private) 3929 if (iocb->private)
3778 EXT4_I(inode)->cur_aio_dio = NULL; 3930 EXT4_I(inode)->cur_aio_dio = NULL;
@@ -3793,8 +3945,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3793 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3945 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3794 ext4_free_io_end(iocb->private); 3946 ext4_free_io_end(iocb->private);
3795 iocb->private = NULL; 3947 iocb->private = NULL;
3796 } else if (ret > 0 && (EXT4_I(inode)->i_state & 3948 } else if (ret > 0 && ext4_test_inode_state(inode,
3797 EXT4_STATE_DIO_UNWRITTEN)) { 3949 EXT4_STATE_DIO_UNWRITTEN)) {
3798 int err; 3950 int err;
3799 /* 3951 /*
3800 * for non AIO case, since the IO is already 3952 * for non AIO case, since the IO is already
@@ -3804,7 +3956,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3804 offset, ret); 3956 offset, ret);
3805 if (err < 0) 3957 if (err < 0)
3806 ret = err; 3958 ret = err;
3807 EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN; 3959 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3808 } 3960 }
3809 return ret; 3961 return ret;
3810 } 3962 }
@@ -4135,18 +4287,27 @@ no_top:
4135 * We release `count' blocks on disk, but (last - first) may be greater 4287 * We release `count' blocks on disk, but (last - first) may be greater
4136 * than `count' because there can be holes in there. 4288 * than `count' because there can be holes in there.
4137 */ 4289 */
4138static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 4290static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4139 struct buffer_head *bh, 4291 struct buffer_head *bh,
4140 ext4_fsblk_t block_to_free, 4292 ext4_fsblk_t block_to_free,
4141 unsigned long count, __le32 *first, 4293 unsigned long count, __le32 *first,
4142 __le32 *last) 4294 __le32 *last)
4143{ 4295{
4144 __le32 *p; 4296 __le32 *p;
4145 int flags = EXT4_FREE_BLOCKS_FORGET; 4297 int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
4146 4298
4147 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 4299 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
4148 flags |= EXT4_FREE_BLOCKS_METADATA; 4300 flags |= EXT4_FREE_BLOCKS_METADATA;
4149 4301
4302 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
4303 count)) {
4304 ext4_error(inode->i_sb, "inode #%lu: "
4305 "attempt to clear blocks %llu len %lu, invalid",
4306 inode->i_ino, (unsigned long long) block_to_free,
4307 count);
4308 return 1;
4309 }
4310
4150 if (try_to_extend_transaction(handle, inode)) { 4311 if (try_to_extend_transaction(handle, inode)) {
4151 if (bh) { 4312 if (bh) {
4152 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4313 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
@@ -4165,6 +4326,7 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
4165 *p = 0; 4326 *p = 0;
4166 4327
4167 ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); 4328 ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
4329 return 0;
4168} 4330}
4169 4331
4170/** 4332/**
@@ -4220,9 +4382,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
4220 } else if (nr == block_to_free + count) { 4382 } else if (nr == block_to_free + count) {
4221 count++; 4383 count++;
4222 } else { 4384 } else {
4223 ext4_clear_blocks(handle, inode, this_bh, 4385 if (ext4_clear_blocks(handle, inode, this_bh,
4224 block_to_free, 4386 block_to_free, count,
4225 count, block_to_free_p, p); 4387 block_to_free_p, p))
4388 break;
4226 block_to_free = nr; 4389 block_to_free = nr;
4227 block_to_free_p = p; 4390 block_to_free_p = p;
4228 count = 1; 4391 count = 1;
@@ -4246,7 +4409,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
4246 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 4409 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
4247 ext4_handle_dirty_metadata(handle, inode, this_bh); 4410 ext4_handle_dirty_metadata(handle, inode, this_bh);
4248 else 4411 else
4249 ext4_error(inode->i_sb, __func__, 4412 ext4_error(inode->i_sb,
4250 "circular indirect block detected, " 4413 "circular indirect block detected, "
4251 "inode=%lu, block=%llu", 4414 "inode=%lu, block=%llu",
4252 inode->i_ino, 4415 inode->i_ino,
@@ -4286,6 +4449,16 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
4286 if (!nr) 4449 if (!nr)
4287 continue; /* A hole */ 4450 continue; /* A hole */
4288 4451
4452 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
4453 nr, 1)) {
4454 ext4_error(inode->i_sb,
4455 "indirect mapped block in inode "
4456 "#%lu invalid (level %d, blk #%lu)",
4457 inode->i_ino, depth,
4458 (unsigned long) nr);
4459 break;
4460 }
4461
4289 /* Go read the buffer for the next level down */ 4462 /* Go read the buffer for the next level down */
4290 bh = sb_bread(inode->i_sb, nr); 4463 bh = sb_bread(inode->i_sb, nr);
4291 4464
@@ -4294,7 +4467,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
4294 * (should be rare). 4467 * (should be rare).
4295 */ 4468 */
4296 if (!bh) { 4469 if (!bh) {
4297 ext4_error(inode->i_sb, "ext4_free_branches", 4470 ext4_error(inode->i_sb,
4298 "Read failure, inode=%lu, block=%llu", 4471 "Read failure, inode=%lu, block=%llu",
4299 inode->i_ino, nr); 4472 inode->i_ino, nr);
4300 continue; 4473 continue;
@@ -4438,8 +4611,10 @@ void ext4_truncate(struct inode *inode)
4438 if (!ext4_can_truncate(inode)) 4611 if (!ext4_can_truncate(inode))
4439 return; 4612 return;
4440 4613
4614 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
4615
4441 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4616 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4442 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; 4617 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4443 4618
4444 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 4619 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
4445 ext4_ext_truncate(inode); 4620 ext4_ext_truncate(inode);
@@ -4609,9 +4784,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
4609 4784
4610 bh = sb_getblk(sb, block); 4785 bh = sb_getblk(sb, block);
4611 if (!bh) { 4786 if (!bh) {
4612 ext4_error(sb, "ext4_get_inode_loc", "unable to read " 4787 ext4_error(sb, "unable to read inode block - "
4613 "inode block - inode=%lu, block=%llu", 4788 "inode=%lu, block=%llu", inode->i_ino, block);
4614 inode->i_ino, block);
4615 return -EIO; 4789 return -EIO;
4616 } 4790 }
4617 if (!buffer_uptodate(bh)) { 4791 if (!buffer_uptodate(bh)) {
@@ -4709,9 +4883,8 @@ make_io:
4709 submit_bh(READ_META, bh); 4883 submit_bh(READ_META, bh);
4710 wait_on_buffer(bh); 4884 wait_on_buffer(bh);
4711 if (!buffer_uptodate(bh)) { 4885 if (!buffer_uptodate(bh)) {
4712 ext4_error(sb, __func__, 4886 ext4_error(sb, "unable to read inode block - inode=%lu,"
4713 "unable to read inode block - inode=%lu, " 4887 " block=%llu", inode->i_ino, block);
4714 "block=%llu", inode->i_ino, block);
4715 brelse(bh); 4888 brelse(bh);
4716 return -EIO; 4889 return -EIO;
4717 } 4890 }
@@ -4725,7 +4898,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4725{ 4898{
4726 /* We have all inode data except xattrs in memory here. */ 4899 /* We have all inode data except xattrs in memory here. */
4727 return __ext4_get_inode_loc(inode, iloc, 4900 return __ext4_get_inode_loc(inode, iloc,
4728 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 4901 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4729} 4902}
4730 4903
4731void ext4_set_inode_flags(struct inode *inode) 4904void ext4_set_inode_flags(struct inode *inode)
@@ -4819,7 +4992,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4819 } 4992 }
4820 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4993 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4821 4994
4822 ei->i_state = 0; 4995 ei->i_state_flags = 0;
4823 ei->i_dir_start_lookup = 0; 4996 ei->i_dir_start_lookup = 0;
4824 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4997 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4825 /* We now have enough fields to check if the inode was active or not. 4998 /* We now have enough fields to check if the inode was active or not.
@@ -4902,7 +5075,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4902 EXT4_GOOD_OLD_INODE_SIZE + 5075 EXT4_GOOD_OLD_INODE_SIZE +
4903 ei->i_extra_isize; 5076 ei->i_extra_isize;
4904 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 5077 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4905 ei->i_state |= EXT4_STATE_XATTR; 5078 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4906 } 5079 }
4907 } else 5080 } else
4908 ei->i_extra_isize = 0; 5081 ei->i_extra_isize = 0;
@@ -4922,8 +5095,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4922 ret = 0; 5095 ret = 0;
4923 if (ei->i_file_acl && 5096 if (ei->i_file_acl &&
4924 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 5097 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4925 ext4_error(sb, __func__, 5098 ext4_error(sb, "bad extended attribute block %llu inode #%lu",
4926 "bad extended attribute block %llu in inode #%lu",
4927 ei->i_file_acl, inode->i_ino); 5099 ei->i_file_acl, inode->i_ino);
4928 ret = -EIO; 5100 ret = -EIO;
4929 goto bad_inode; 5101 goto bad_inode;
@@ -4969,8 +5141,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4969 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 5141 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4970 } else { 5142 } else {
4971 ret = -EIO; 5143 ret = -EIO;
4972 ext4_error(inode->i_sb, __func__, 5144 ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu",
4973 "bogus i_mode (%o) for inode=%lu",
4974 inode->i_mode, inode->i_ino); 5145 inode->i_mode, inode->i_ino);
4975 goto bad_inode; 5146 goto bad_inode;
4976 } 5147 }
@@ -5042,7 +5213,7 @@ static int ext4_do_update_inode(handle_t *handle,
5042 5213
5043 /* For fields not not tracking in the in-memory inode, 5214 /* For fields not not tracking in the in-memory inode,
5044 * initialise them to zero for new inodes. */ 5215 * initialise them to zero for new inodes. */
5045 if (ei->i_state & EXT4_STATE_NEW) 5216 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5046 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5217 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5047 5218
5048 ext4_get_inode_flags(ei); 5219 ext4_get_inode_flags(ei);
@@ -5106,7 +5277,7 @@ static int ext4_do_update_inode(handle_t *handle,
5106 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 5277 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
5107 sb->s_dirt = 1; 5278 sb->s_dirt = 1;
5108 ext4_handle_sync(handle); 5279 ext4_handle_sync(handle);
5109 err = ext4_handle_dirty_metadata(handle, inode, 5280 err = ext4_handle_dirty_metadata(handle, NULL,
5110 EXT4_SB(sb)->s_sbh); 5281 EXT4_SB(sb)->s_sbh);
5111 } 5282 }
5112 } 5283 }
@@ -5135,10 +5306,10 @@ static int ext4_do_update_inode(handle_t *handle,
5135 } 5306 }
5136 5307
5137 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5308 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5138 rc = ext4_handle_dirty_metadata(handle, inode, bh); 5309 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
5139 if (!err) 5310 if (!err)
5140 err = rc; 5311 err = rc;
5141 ei->i_state &= ~EXT4_STATE_NEW; 5312 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5142 5313
5143 ext4_update_inode_fsync_trans(handle, inode, 0); 5314 ext4_update_inode_fsync_trans(handle, inode, 0);
5144out_brelse: 5315out_brelse:
@@ -5182,7 +5353,7 @@ out_brelse:
5182 * `stuff()' is running, and the new i_size will be lost. Plus the inode 5353 * `stuff()' is running, and the new i_size will be lost. Plus the inode
5183 * will no longer be on the superblock's dirty inode list. 5354 * will no longer be on the superblock's dirty inode list.
5184 */ 5355 */
5185int ext4_write_inode(struct inode *inode, int wait) 5356int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5186{ 5357{
5187 int err; 5358 int err;
5188 5359
@@ -5196,7 +5367,7 @@ int ext4_write_inode(struct inode *inode, int wait)
5196 return -EIO; 5367 return -EIO;
5197 } 5368 }
5198 5369
5199 if (!wait) 5370 if (wbc->sync_mode != WB_SYNC_ALL)
5200 return 0; 5371 return 0;
5201 5372
5202 err = ext4_force_commit(inode->i_sb); 5373 err = ext4_force_commit(inode->i_sb);
@@ -5206,13 +5377,11 @@ int ext4_write_inode(struct inode *inode, int wait)
5206 err = ext4_get_inode_loc(inode, &iloc); 5377 err = ext4_get_inode_loc(inode, &iloc);
5207 if (err) 5378 if (err)
5208 return err; 5379 return err;
5209 if (wait) 5380 if (wbc->sync_mode == WB_SYNC_ALL)
5210 sync_dirty_buffer(iloc.bh); 5381 sync_dirty_buffer(iloc.bh);
5211 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5382 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5212 ext4_error(inode->i_sb, __func__, 5383 ext4_error(inode->i_sb, "IO error syncing inode, "
5213 "IO error syncing inode, " 5384 "inode=%lu, block=%llu", inode->i_ino,
5214 "inode=%lu, block=%llu",
5215 inode->i_ino,
5216 (unsigned long long)iloc.bh->b_blocknr); 5385 (unsigned long long)iloc.bh->b_blocknr);
5217 err = -EIO; 5386 err = -EIO;
5218 } 5387 }
@@ -5295,7 +5464,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5295 } 5464 }
5296 5465
5297 if (S_ISREG(inode->i_mode) && 5466 if (S_ISREG(inode->i_mode) &&
5298 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 5467 attr->ia_valid & ATTR_SIZE &&
5468 (attr->ia_size < inode->i_size ||
5469 (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) {
5299 handle_t *handle; 5470 handle_t *handle;
5300 5471
5301 handle = ext4_journal_start(inode, 3); 5472 handle = ext4_journal_start(inode, 3);
@@ -5326,6 +5497,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5326 goto err_out; 5497 goto err_out;
5327 } 5498 }
5328 } 5499 }
5500 /* ext4_truncate will clear the flag */
5501 if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))
5502 ext4_truncate(inode);
5329 } 5503 }
5330 5504
5331 rc = inode_setattr(inode, attr); 5505 rc = inode_setattr(inode, attr);
@@ -5564,8 +5738,8 @@ static int ext4_expand_extra_isize(struct inode *inode,
5564 entry = IFIRST(header); 5738 entry = IFIRST(header);
5565 5739
5566 /* No extended attributes present */ 5740 /* No extended attributes present */
5567 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 5741 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5568 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5742 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5569 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5743 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5570 new_extra_isize); 5744 new_extra_isize);
5571 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5745 EXT4_I(inode)->i_extra_isize = new_extra_isize;
@@ -5609,7 +5783,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5609 err = ext4_reserve_inode_write(handle, inode, &iloc); 5783 err = ext4_reserve_inode_write(handle, inode, &iloc);
5610 if (ext4_handle_valid(handle) && 5784 if (ext4_handle_valid(handle) &&
5611 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5785 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5612 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 5786 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5613 /* 5787 /*
5614 * We need extra buffer credits since we may write into EA block 5788 * We need extra buffer credits since we may write into EA block
5615 * with this same handle. If journal_extend fails, then it will 5789 * with this same handle. If journal_extend fails, then it will
@@ -5623,10 +5797,11 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5623 sbi->s_want_extra_isize, 5797 sbi->s_want_extra_isize,
5624 iloc, handle); 5798 iloc, handle);
5625 if (ret) { 5799 if (ret) {
5626 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 5800 ext4_set_inode_state(inode,
5801 EXT4_STATE_NO_EXPAND);
5627 if (mnt_count != 5802 if (mnt_count !=
5628 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5803 le16_to_cpu(sbi->s_es->s_mnt_count)) {
5629 ext4_warning(inode->i_sb, __func__, 5804 ext4_warning(inode->i_sb,
5630 "Unable to expand inode %lu. Delete" 5805 "Unable to expand inode %lu. Delete"
5631 " some EAs or run e2fsck.", 5806 " some EAs or run e2fsck.",
5632 inode->i_ino); 5807 inode->i_ino);
@@ -5690,7 +5865,7 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5690 err = jbd2_journal_get_write_access(handle, iloc.bh); 5865 err = jbd2_journal_get_write_access(handle, iloc.bh);
5691 if (!err) 5866 if (!err)
5692 err = ext4_handle_dirty_metadata(handle, 5867 err = ext4_handle_dirty_metadata(handle,
5693 inode, 5868 NULL,
5694 iloc.bh); 5869 iloc.bh);
5695 brelse(iloc.bh); 5870 brelse(iloc.bh);
5696 } 5871 }
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index b63d193126db..016d0249294f 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
92 flags &= ~EXT4_EXTENTS_FL; 92 flags &= ~EXT4_EXTENTS_FL;
93 } 93 }
94 94
95 if (flags & EXT4_EOFBLOCKS_FL) {
96 /* we don't support adding EOFBLOCKS flag */
97 if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
98 err = -EOPNOTSUPP;
99 goto flags_out;
100 }
101 } else if (oldflags & EXT4_EOFBLOCKS_FL)
102 ext4_truncate(inode);
103
95 handle = ext4_journal_start(inode, 1); 104 handle = ext4_journal_start(inode, 1);
96 if (IS_ERR(handle)) { 105 if (IS_ERR(handle)) {
97 err = PTR_ERR(handle); 106 err = PTR_ERR(handle);
@@ -249,7 +258,8 @@ setversion_out:
249 if (me.moved_len > 0) 258 if (me.moved_len > 0)
250 file_remove_suid(donor_filp); 259 file_remove_suid(donor_filp);
251 260
252 if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) 261 if (copy_to_user((struct move_extent __user *)arg,
262 &me, sizeof(me)))
253 err = -EFAULT; 263 err = -EFAULT;
254mext_out: 264mext_out:
255 fput(donor_filp); 265 fput(donor_filp);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 0b905781e8e6..506713a2ebd8 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -441,10 +441,9 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
441 for (i = 0; i < count; i++) { 441 for (i = 0; i < count; i++) {
442 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 442 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
443 ext4_fsblk_t blocknr; 443 ext4_fsblk_t blocknr;
444 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); 444
445 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
445 blocknr += first + i; 446 blocknr += first + i;
446 blocknr +=
447 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
448 ext4_grp_locked_error(sb, e4b->bd_group, 447 ext4_grp_locked_error(sb, e4b->bd_group,
449 __func__, "double-free of inode" 448 __func__, "double-free of inode"
450 " %lu's block %llu(bit %u in group %u)", 449 " %lu's block %llu(bit %u in group %u)",
@@ -1255,10 +1254,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1255 1254
1256 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { 1255 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1257 ext4_fsblk_t blocknr; 1256 ext4_fsblk_t blocknr;
1258 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); 1257
1258 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1259 blocknr += block; 1259 blocknr += block;
1260 blocknr +=
1261 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1262 ext4_grp_locked_error(sb, e4b->bd_group, 1260 ext4_grp_locked_error(sb, e4b->bd_group,
1263 __func__, "double-free of inode" 1261 __func__, "double-free of inode"
1264 " %lu's block %llu(bit %u in group %u)", 1262 " %lu's block %llu(bit %u in group %u)",
@@ -1631,7 +1629,6 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1631 int max; 1629 int max;
1632 int err; 1630 int err;
1633 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1631 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1634 struct ext4_super_block *es = sbi->s_es;
1635 struct ext4_free_extent ex; 1632 struct ext4_free_extent ex;
1636 1633
1637 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1634 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
@@ -1648,8 +1645,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1648 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1645 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1649 ext4_fsblk_t start; 1646 ext4_fsblk_t start;
1650 1647
1651 start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) + 1648 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1652 ex.fe_start + le32_to_cpu(es->s_first_data_block); 1649 ex.fe_start;
1653 /* use do_div to get remainder (would be 64-bit modulo) */ 1650 /* use do_div to get remainder (would be 64-bit modulo) */
1654 if (do_div(start, sbi->s_stripe) == 0) { 1651 if (do_div(start, sbi->s_stripe) == 0) {
1655 ac->ac_found++; 1652 ac->ac_found++;
@@ -1803,8 +1800,8 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1803 BUG_ON(sbi->s_stripe == 0); 1800 BUG_ON(sbi->s_stripe == 0);
1804 1801
1805 /* find first stripe-aligned block in group */ 1802 /* find first stripe-aligned block in group */
1806 first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb) 1803 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1807 + le32_to_cpu(sbi->s_es->s_first_data_block); 1804
1808 a = first_group_block + sbi->s_stripe - 1; 1805 a = first_group_block + sbi->s_stripe - 1;
1809 do_div(a, sbi->s_stripe); 1806 do_div(a, sbi->s_stripe);
1810 i = (a * sbi->s_stripe) - first_group_block; 1807 i = (a * sbi->s_stripe) - first_group_block;
@@ -2256,7 +2253,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2256 2253
2257 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2254 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2258 init_rwsem(&meta_group_info[i]->alloc_sem); 2255 init_rwsem(&meta_group_info[i]->alloc_sem);
2259 meta_group_info[i]->bb_free_root.rb_node = NULL; 2256 meta_group_info[i]->bb_free_root = RB_ROOT;
2260 2257
2261#ifdef DOUBLE_CHECK 2258#ifdef DOUBLE_CHECK
2262 { 2259 {
@@ -2560,12 +2557,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2560 ext4_unlock_group(sb, entry->group); 2557 ext4_unlock_group(sb, entry->group);
2561 if (test_opt(sb, DISCARD)) { 2558 if (test_opt(sb, DISCARD)) {
2562 ext4_fsblk_t discard_block; 2559 ext4_fsblk_t discard_block;
2563 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
2564 2560
2565 discard_block = (ext4_fsblk_t)entry->group * 2561 discard_block = entry->start_blk +
2566 EXT4_BLOCKS_PER_GROUP(sb) 2562 ext4_group_first_block_no(sb, entry->group);
2567 + entry->start_blk
2568 + le32_to_cpu(es->s_first_data_block);
2569 trace_ext4_discard_blocks(sb, 2563 trace_ext4_discard_blocks(sb,
2570 (unsigned long long)discard_block, 2564 (unsigned long long)discard_block,
2571 entry->count); 2565 entry->count);
@@ -2703,14 +2697,11 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2703 if (err) 2697 if (err)
2704 goto out_err; 2698 goto out_err;
2705 2699
2706 block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb) 2700 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2707 + ac->ac_b_ex.fe_start
2708 + le32_to_cpu(es->s_first_data_block);
2709 2701
2710 len = ac->ac_b_ex.fe_len; 2702 len = ac->ac_b_ex.fe_len;
2711 if (!ext4_data_block_valid(sbi, block, len)) { 2703 if (!ext4_data_block_valid(sbi, block, len)) {
2712 ext4_error(sb, __func__, 2704 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2713 "Allocating blocks %llu-%llu which overlap "
2714 "fs metadata\n", block, block+len); 2705 "fs metadata\n", block, block+len);
2715 /* File system mounted not to panic on error 2706 /* File system mounted not to panic on error
2716 * Fix the bitmap and repeat the block allocation 2707 * Fix the bitmap and repeat the block allocation
@@ -3161,9 +3152,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3161 /* The max size of hash table is PREALLOC_TB_SIZE */ 3152 /* The max size of hash table is PREALLOC_TB_SIZE */
3162 order = PREALLOC_TB_SIZE - 1; 3153 order = PREALLOC_TB_SIZE - 1;
3163 3154
3164 goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) + 3155 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3165 ac->ac_g_ex.fe_start +
3166 le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
3167 /* 3156 /*
3168 * search for the prealloc space that is having 3157 * search for the prealloc space that is having
3169 * minimal distance from the goal block. 3158 * minimal distance from the goal block.
@@ -3526,8 +3515,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3526 if (bit >= end) 3515 if (bit >= end)
3527 break; 3516 break;
3528 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3517 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3529 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + 3518 start = ext4_group_first_block_no(sb, group) + bit;
3530 le32_to_cpu(sbi->s_es->s_first_data_block);
3531 mb_debug(1, " free preallocated %u/%u in group %u\n", 3519 mb_debug(1, " free preallocated %u/%u in group %u\n",
3532 (unsigned) start, (unsigned) next - bit, 3520 (unsigned) start, (unsigned) next - bit,
3533 (unsigned) group); 3521 (unsigned) group);
@@ -3623,15 +3611,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
3623 3611
3624 bitmap_bh = ext4_read_block_bitmap(sb, group); 3612 bitmap_bh = ext4_read_block_bitmap(sb, group);
3625 if (bitmap_bh == NULL) { 3613 if (bitmap_bh == NULL) {
3626 ext4_error(sb, __func__, "Error in reading block " 3614 ext4_error(sb, "Error reading block bitmap for %u", group);
3627 "bitmap for %u", group);
3628 return 0; 3615 return 0;
3629 } 3616 }
3630 3617
3631 err = ext4_mb_load_buddy(sb, group, &e4b); 3618 err = ext4_mb_load_buddy(sb, group, &e4b);
3632 if (err) { 3619 if (err) {
3633 ext4_error(sb, __func__, "Error in loading buddy " 3620 ext4_error(sb, "Error loading buddy information for %u", group);
3634 "information for %u", group);
3635 put_bh(bitmap_bh); 3621 put_bh(bitmap_bh);
3636 return 0; 3622 return 0;
3637 } 3623 }
@@ -3804,15 +3790,15 @@ repeat:
3804 3790
3805 err = ext4_mb_load_buddy(sb, group, &e4b); 3791 err = ext4_mb_load_buddy(sb, group, &e4b);
3806 if (err) { 3792 if (err) {
3807 ext4_error(sb, __func__, "Error in loading buddy " 3793 ext4_error(sb, "Error loading buddy information for %u",
3808 "information for %u", group); 3794 group);
3809 continue; 3795 continue;
3810 } 3796 }
3811 3797
3812 bitmap_bh = ext4_read_block_bitmap(sb, group); 3798 bitmap_bh = ext4_read_block_bitmap(sb, group);
3813 if (bitmap_bh == NULL) { 3799 if (bitmap_bh == NULL) {
3814 ext4_error(sb, __func__, "Error in reading block " 3800 ext4_error(sb, "Error reading block bitmap for %u",
3815 "bitmap for %u", group); 3801 group);
3816 ext4_mb_release_desc(&e4b); 3802 ext4_mb_release_desc(&e4b);
3817 continue; 3803 continue;
3818 } 3804 }
@@ -3938,7 +3924,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3938 3924
3939 /* don't use group allocation for large files */ 3925 /* don't use group allocation for large files */
3940 size = max(size, isize); 3926 size = max(size, isize);
3941 if (size >= sbi->s_mb_stream_request) { 3927 if (size > sbi->s_mb_stream_request) {
3942 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 3928 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3943 return; 3929 return;
3944 } 3930 }
@@ -4077,8 +4063,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
4077 4063
4078 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); 4064 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4079 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4065 if (ext4_mb_load_buddy(sb, group, &e4b)) {
4080 ext4_error(sb, __func__, "Error in loading buddy " 4066 ext4_error(sb, "Error loading buddy information for %u",
4081 "information for %u", group); 4067 group);
4082 continue; 4068 continue;
4083 } 4069 }
4084 ext4_lock_group(sb, group); 4070 ext4_lock_group(sb, group);
@@ -4476,10 +4462,10 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4476 4462
4477 sbi = EXT4_SB(sb); 4463 sbi = EXT4_SB(sb);
4478 es = EXT4_SB(sb)->s_es; 4464 es = EXT4_SB(sb)->s_es;
4479 if (!ext4_data_block_valid(sbi, block, count)) { 4465 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4480 ext4_error(sb, __func__, 4466 !ext4_data_block_valid(sbi, block, count)) {
4481 "Freeing blocks not in datazone - " 4467 ext4_error(sb, "Freeing blocks not in datazone - "
4482 "block = %llu, count = %lu", block, count); 4468 "block = %llu, count = %lu", block, count);
4483 goto error_return; 4469 goto error_return;
4484 } 4470 }
4485 4471
@@ -4547,8 +4533,7 @@ do_more:
4547 in_range(block + count - 1, ext4_inode_table(sb, gdp), 4533 in_range(block + count - 1, ext4_inode_table(sb, gdp),
4548 EXT4_SB(sb)->s_itb_per_group)) { 4534 EXT4_SB(sb)->s_itb_per_group)) {
4549 4535
4550 ext4_error(sb, __func__, 4536 ext4_error(sb, "Freeing blocks in system zone - "
4551 "Freeing blocks in system zone - "
4552 "Block = %llu, count = %lu", block, count); 4537 "Block = %llu, count = %lu", block, count);
4553 /* err = 0. ext4_std_error should be a no op */ 4538 /* err = 0. ext4_std_error should be a no op */
4554 goto error_return; 4539 goto error_return;
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 436521cae456..b619322c76f0 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -220,16 +220,9 @@ struct ext4_buddy {
220#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) 220#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
221#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) 221#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
222 222
223#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
224
225static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, 223static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
226 struct ext4_free_extent *fex) 224 struct ext4_free_extent *fex)
227{ 225{
228 ext4_fsblk_t block; 226 return ext4_group_first_block_no(sb, fex->fe_group) + fex->fe_start;
229
230 block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
231 + fex->fe_start
232 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
233 return block;
234} 227}
235#endif 228#endif
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 81415814b00b..8b87bd0eac95 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -365,12 +365,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
365 * happened after we started the migrate. We need to 365 * happened after we started the migrate. We need to
366 * fail the migrate 366 * fail the migrate
367 */ 367 */
368 if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) { 368 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
369 retval = -EAGAIN; 369 retval = -EAGAIN;
370 up_write(&EXT4_I(inode)->i_data_sem); 370 up_write(&EXT4_I(inode)->i_data_sem);
371 goto err_out; 371 goto err_out;
372 } else 372 } else
373 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; 373 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
374 /* 374 /*
375 * We have the extent map build with the tmp inode. 375 * We have the extent map build with the tmp inode.
376 * Now copy the i_data across 376 * Now copy the i_data across
@@ -503,14 +503,10 @@ int ext4_ext_migrate(struct inode *inode)
503 } 503 }
504 i_size_write(tmp_inode, i_size_read(inode)); 504 i_size_write(tmp_inode, i_size_read(inode));
505 /* 505 /*
506 * We don't want the inode to be reclaimed 506 * Set the i_nlink to zero so it will be deleted later
507 * if we got interrupted in between. We have 507 * when we drop inode reference.
508 * this tmp inode carrying reference to the
509 * data blocks of the original file. We set
510 * the i_nlink to zero at the last stage after
511 * switching the original file to extent format
512 */ 508 */
513 tmp_inode->i_nlink = 1; 509 tmp_inode->i_nlink = 0;
514 510
515 ext4_ext_tree_init(handle, tmp_inode); 511 ext4_ext_tree_init(handle, tmp_inode);
516 ext4_orphan_add(handle, tmp_inode); 512 ext4_orphan_add(handle, tmp_inode);
@@ -533,10 +529,20 @@ int ext4_ext_migrate(struct inode *inode)
533 * allocation. 529 * allocation.
534 */ 530 */
535 down_read((&EXT4_I(inode)->i_data_sem)); 531 down_read((&EXT4_I(inode)->i_data_sem));
536 EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE; 532 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
537 up_read((&EXT4_I(inode)->i_data_sem)); 533 up_read((&EXT4_I(inode)->i_data_sem));
538 534
539 handle = ext4_journal_start(inode, 1); 535 handle = ext4_journal_start(inode, 1);
536 if (IS_ERR(handle)) {
537 /*
538 * It is impossible to update on-disk structures without
539 * a handle, so just rollback in-core changes and live other
540 * work to orphan_list_cleanup()
541 */
542 ext4_orphan_del(NULL, tmp_inode);
543 retval = PTR_ERR(handle);
544 goto out;
545 }
540 546
541 ei = EXT4_I(inode); 547 ei = EXT4_I(inode);
542 i_data = ei->i_data; 548 i_data = ei->i_data;
@@ -618,15 +624,8 @@ err_out:
618 624
619 /* Reset the extent details */ 625 /* Reset the extent details */
620 ext4_ext_tree_init(handle, tmp_inode); 626 ext4_ext_tree_init(handle, tmp_inode);
621
622 /*
623 * Set the i_nlink to zero so that
624 * generic_drop_inode really deletes the
625 * inode
626 */
627 tmp_inode->i_nlink = 0;
628
629 ext4_journal_stop(handle); 627 ext4_journal_stop(handle);
628out:
630 unlock_new_inode(tmp_inode); 629 unlock_new_inode(tmp_inode);
631 iput(tmp_inode); 630 iput(tmp_inode);
632 631
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 82c415be87a4..aa5fe28d180f 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -152,12 +152,12 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2,
152 int ret = 0; 152 int ret = 0;
153 153
154 if (inode1 == NULL) { 154 if (inode1 == NULL) {
155 ext4_error(inode2->i_sb, function, 155 __ext4_error(inode2->i_sb, function,
156 "Both inodes should not be NULL: " 156 "Both inodes should not be NULL: "
157 "inode1 NULL inode2 %lu", inode2->i_ino); 157 "inode1 NULL inode2 %lu", inode2->i_ino);
158 ret = -EIO; 158 ret = -EIO;
159 } else if (inode2 == NULL) { 159 } else if (inode2 == NULL) {
160 ext4_error(inode1->i_sb, function, 160 __ext4_error(inode1->i_sb, function,
161 "Both inodes should not be NULL: " 161 "Both inodes should not be NULL: "
162 "inode1 %lu inode2 NULL", inode1->i_ino); 162 "inode1 %lu inode2 NULL", inode1->i_ino);
163 ret = -EIO; 163 ret = -EIO;
@@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
252 } 252 }
253 253
254 o_start->ee_len = start_ext->ee_len; 254 o_start->ee_len = start_ext->ee_len;
255 eblock = le32_to_cpu(start_ext->ee_block);
255 new_flag = 1; 256 new_flag = 1;
256 257
257 } else if (start_ext->ee_len && new_ext->ee_len && 258 } else if (start_ext->ee_len && new_ext->ee_len &&
@@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
262 * orig |------------------------------| 263 * orig |------------------------------|
263 */ 264 */
264 o_start->ee_len = start_ext->ee_len; 265 o_start->ee_len = start_ext->ee_len;
266 eblock = le32_to_cpu(start_ext->ee_block);
265 new_flag = 1; 267 new_flag = 1;
266 268
267 } else if (!start_ext->ee_len && new_ext->ee_len && 269 } else if (!start_ext->ee_len && new_ext->ee_len &&
@@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
475 struct ext4_extent *oext, *o_start, *o_end, *prev_ext; 477 struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
476 struct ext4_extent new_ext, start_ext, end_ext; 478 struct ext4_extent new_ext, start_ext, end_ext;
477 ext4_lblk_t new_ext_end; 479 ext4_lblk_t new_ext_end;
478 ext4_fsblk_t new_phys_end;
479 int oext_alen, new_ext_alen, end_ext_alen; 480 int oext_alen, new_ext_alen, end_ext_alen;
480 int depth = ext_depth(orig_inode); 481 int depth = ext_depth(orig_inode);
481 int ret; 482 int ret;
@@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
489 new_ext.ee_len = dext->ee_len; 490 new_ext.ee_len = dext->ee_len;
490 new_ext_alen = ext4_ext_get_actual_len(&new_ext); 491 new_ext_alen = ext4_ext_get_actual_len(&new_ext);
491 new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; 492 new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
492 new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1;
493 493
494 /* 494 /*
495 * Case: original extent is first 495 * Case: original extent is first
@@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
502 le32_to_cpu(oext->ee_block) + oext_alen) { 502 le32_to_cpu(oext->ee_block) + oext_alen) {
503 start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - 503 start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
504 le32_to_cpu(oext->ee_block)); 504 le32_to_cpu(oext->ee_block));
505 start_ext.ee_block = oext->ee_block;
505 copy_extent_status(oext, &start_ext); 506 copy_extent_status(oext, &start_ext);
506 } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { 507 } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
507 prev_ext = oext - 1; 508 prev_ext = oext - 1;
@@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
515 start_ext.ee_len = cpu_to_le16( 516 start_ext.ee_len = cpu_to_le16(
516 ext4_ext_get_actual_len(prev_ext) + 517 ext4_ext_get_actual_len(prev_ext) +
517 new_ext_alen); 518 new_ext_alen);
519 start_ext.ee_block = oext->ee_block;
518 copy_extent_status(prev_ext, &start_ext); 520 copy_extent_status(prev_ext, &start_ext);
519 new_ext.ee_len = 0; 521 new_ext.ee_len = 0;
520 } 522 }
@@ -526,7 +528,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
526 * new_ext |-------| 528 * new_ext |-------|
527 */ 529 */
528 if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { 530 if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
529 ext4_error(orig_inode->i_sb, __func__, 531 ext4_error(orig_inode->i_sb,
530 "new_ext_end(%u) should be less than or equal to " 532 "new_ext_end(%u) should be less than or equal to "
531 "oext->ee_block(%u) + oext_alen(%d) - 1", 533 "oext->ee_block(%u) + oext_alen(%d) - 1",
532 new_ext_end, le32_to_cpu(oext->ee_block), 534 new_ext_end, le32_to_cpu(oext->ee_block),
@@ -689,12 +691,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
689 while (1) { 691 while (1) {
690 /* The extent for donor must be found. */ 692 /* The extent for donor must be found. */
691 if (!dext) { 693 if (!dext) {
692 ext4_error(donor_inode->i_sb, __func__, 694 ext4_error(donor_inode->i_sb,
693 "The extent for donor must be found"); 695 "The extent for donor must be found");
694 *err = -EIO; 696 *err = -EIO;
695 goto out; 697 goto out;
696 } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { 698 } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
697 ext4_error(donor_inode->i_sb, __func__, 699 ext4_error(donor_inode->i_sb,
698 "Donor offset(%u) and the first block of donor " 700 "Donor offset(%u) and the first block of donor "
699 "extent(%u) should be equal", 701 "extent(%u) should be equal",
700 donor_off, 702 donor_off,
@@ -928,7 +930,7 @@ out2:
928} 930}
929 931
930/** 932/**
931 * mext_check_argumants - Check whether move extent can be done 933 * mext_check_arguments - Check whether move extent can be done
932 * 934 *
933 * @orig_inode: original inode 935 * @orig_inode: original inode
934 * @donor_inode: donor inode 936 * @donor_inode: donor inode
@@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_inode,
949 unsigned int blkbits = orig_inode->i_blkbits; 951 unsigned int blkbits = orig_inode->i_blkbits;
950 unsigned int blocksize = 1 << blkbits; 952 unsigned int blocksize = 1 << blkbits;
951 953
952 /* Regular file check */
953 if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
954 ext4_debug("ext4 move extent: The argument files should be "
955 "regular file [ino:orig %lu, donor %lu]\n",
956 orig_inode->i_ino, donor_inode->i_ino);
957 return -EINVAL;
958 }
959
960 if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { 954 if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
961 ext4_debug("ext4 move extent: suid or sgid is set" 955 ext4_debug("ext4 move extent: suid or sgid is set"
962 " to donor file [ino:orig %lu, donor %lu]\n", 956 " to donor file [ino:orig %lu, donor %lu]\n",
@@ -1204,6 +1198,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1204 return -EINVAL; 1198 return -EINVAL;
1205 } 1199 }
1206 1200
1201 /* Regular file check */
1202 if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
1203 ext4_debug("ext4 move extent: The argument files should be "
1204 "regular file [ino:orig %lu, donor %lu]\n",
1205 orig_inode->i_ino, donor_inode->i_ino);
1206 return -EINVAL;
1207 }
1208
1207 /* Protect orig and donor inodes against a truncate */ 1209 /* Protect orig and donor inodes against a truncate */
1208 ret1 = mext_inode_double_lock(orig_inode, donor_inode); 1210 ret1 = mext_inode_double_lock(orig_inode, donor_inode);
1209 if (ret1 < 0) 1211 if (ret1 < 0)
@@ -1351,7 +1353,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1351 if (ret1 < 0) 1353 if (ret1 < 0)
1352 break; 1354 break;
1353 if (*moved_len > len) { 1355 if (*moved_len > len) {
1354 ext4_error(orig_inode->i_sb, __func__, 1356 ext4_error(orig_inode->i_sb,
1355 "We replaced blocks too much! " 1357 "We replaced blocks too much! "
1356 "sum of replaced: %llu requested: %llu", 1358 "sum of replaced: %llu requested: %llu",
1357 *moved_len, len); 1359 *moved_len, len);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 7f3d2d75a0dc..0c070fabd108 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -383,8 +383,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
383 if (root->info.hash_version != DX_HASH_TEA && 383 if (root->info.hash_version != DX_HASH_TEA &&
384 root->info.hash_version != DX_HASH_HALF_MD4 && 384 root->info.hash_version != DX_HASH_HALF_MD4 &&
385 root->info.hash_version != DX_HASH_LEGACY) { 385 root->info.hash_version != DX_HASH_LEGACY) {
386 ext4_warning(dir->i_sb, __func__, 386 ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
387 "Unrecognised inode hash code %d",
388 root->info.hash_version); 387 root->info.hash_version);
389 brelse(bh); 388 brelse(bh);
390 *err = ERR_BAD_DX_DIR; 389 *err = ERR_BAD_DX_DIR;
@@ -399,8 +398,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
399 hash = hinfo->hash; 398 hash = hinfo->hash;
400 399
401 if (root->info.unused_flags & 1) { 400 if (root->info.unused_flags & 1) {
402 ext4_warning(dir->i_sb, __func__, 401 ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
403 "Unimplemented inode hash flags: %#06x",
404 root->info.unused_flags); 402 root->info.unused_flags);
405 brelse(bh); 403 brelse(bh);
406 *err = ERR_BAD_DX_DIR; 404 *err = ERR_BAD_DX_DIR;
@@ -408,8 +406,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
408 } 406 }
409 407
410 if ((indirect = root->info.indirect_levels) > 1) { 408 if ((indirect = root->info.indirect_levels) > 1) {
411 ext4_warning(dir->i_sb, __func__, 409 ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
412 "Unimplemented inode hash depth: %#06x",
413 root->info.indirect_levels); 410 root->info.indirect_levels);
414 brelse(bh); 411 brelse(bh);
415 *err = ERR_BAD_DX_DIR; 412 *err = ERR_BAD_DX_DIR;
@@ -421,8 +418,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
421 418
422 if (dx_get_limit(entries) != dx_root_limit(dir, 419 if (dx_get_limit(entries) != dx_root_limit(dir,
423 root->info.info_length)) { 420 root->info.info_length)) {
424 ext4_warning(dir->i_sb, __func__, 421 ext4_warning(dir->i_sb, "dx entry: limit != root limit");
425 "dx entry: limit != root limit");
426 brelse(bh); 422 brelse(bh);
427 *err = ERR_BAD_DX_DIR; 423 *err = ERR_BAD_DX_DIR;
428 goto fail; 424 goto fail;
@@ -433,7 +429,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
433 { 429 {
434 count = dx_get_count(entries); 430 count = dx_get_count(entries);
435 if (!count || count > dx_get_limit(entries)) { 431 if (!count || count > dx_get_limit(entries)) {
436 ext4_warning(dir->i_sb, __func__, 432 ext4_warning(dir->i_sb,
437 "dx entry: no count or count > limit"); 433 "dx entry: no count or count > limit");
438 brelse(bh); 434 brelse(bh);
439 *err = ERR_BAD_DX_DIR; 435 *err = ERR_BAD_DX_DIR;
@@ -478,7 +474,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
478 goto fail2; 474 goto fail2;
479 at = entries = ((struct dx_node *) bh->b_data)->entries; 475 at = entries = ((struct dx_node *) bh->b_data)->entries;
480 if (dx_get_limit(entries) != dx_node_limit (dir)) { 476 if (dx_get_limit(entries) != dx_node_limit (dir)) {
481 ext4_warning(dir->i_sb, __func__, 477 ext4_warning(dir->i_sb,
482 "dx entry: limit != node limit"); 478 "dx entry: limit != node limit");
483 brelse(bh); 479 brelse(bh);
484 *err = ERR_BAD_DX_DIR; 480 *err = ERR_BAD_DX_DIR;
@@ -494,7 +490,7 @@ fail2:
494 } 490 }
495fail: 491fail:
496 if (*err == ERR_BAD_DX_DIR) 492 if (*err == ERR_BAD_DX_DIR)
497 ext4_warning(dir->i_sb, __func__, 493 ext4_warning(dir->i_sb,
498 "Corrupt dir inode %ld, running e2fsck is " 494 "Corrupt dir inode %ld, running e2fsck is "
499 "recommended.", dir->i_ino); 495 "recommended.", dir->i_ino);
500 return NULL; 496 return NULL;
@@ -947,9 +943,8 @@ restart:
947 wait_on_buffer(bh); 943 wait_on_buffer(bh);
948 if (!buffer_uptodate(bh)) { 944 if (!buffer_uptodate(bh)) {
949 /* read error, skip block & hope for the best */ 945 /* read error, skip block & hope for the best */
950 ext4_error(sb, __func__, "reading directory #%lu " 946 ext4_error(sb, "reading directory #%lu offset %lu",
951 "offset %lu", dir->i_ino, 947 dir->i_ino, (unsigned long)block);
952 (unsigned long)block);
953 brelse(bh); 948 brelse(bh);
954 goto next; 949 goto next;
955 } 950 }
@@ -1041,7 +1036,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
1041 retval = ext4_htree_next_block(dir, hash, frame, 1036 retval = ext4_htree_next_block(dir, hash, frame,
1042 frames, NULL); 1037 frames, NULL);
1043 if (retval < 0) { 1038 if (retval < 0) {
1044 ext4_warning(sb, __func__, 1039 ext4_warning(sb,
1045 "error reading index page in directory #%lu", 1040 "error reading index page in directory #%lu",
1046 dir->i_ino); 1041 dir->i_ino);
1047 *err = retval; 1042 *err = retval;
@@ -1071,14 +1066,13 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1071 __u32 ino = le32_to_cpu(de->inode); 1066 __u32 ino = le32_to_cpu(de->inode);
1072 brelse(bh); 1067 brelse(bh);
1073 if (!ext4_valid_inum(dir->i_sb, ino)) { 1068 if (!ext4_valid_inum(dir->i_sb, ino)) {
1074 ext4_error(dir->i_sb, "ext4_lookup", 1069 ext4_error(dir->i_sb, "bad inode number: %u", ino);
1075 "bad inode number: %u", ino);
1076 return ERR_PTR(-EIO); 1070 return ERR_PTR(-EIO);
1077 } 1071 }
1078 inode = ext4_iget(dir->i_sb, ino); 1072 inode = ext4_iget(dir->i_sb, ino);
1079 if (unlikely(IS_ERR(inode))) { 1073 if (unlikely(IS_ERR(inode))) {
1080 if (PTR_ERR(inode) == -ESTALE) { 1074 if (PTR_ERR(inode) == -ESTALE) {
1081 ext4_error(dir->i_sb, __func__, 1075 ext4_error(dir->i_sb,
1082 "deleted inode referenced: %u", 1076 "deleted inode referenced: %u",
1083 ino); 1077 ino);
1084 return ERR_PTR(-EIO); 1078 return ERR_PTR(-EIO);
@@ -1110,7 +1104,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
1110 brelse(bh); 1104 brelse(bh);
1111 1105
1112 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { 1106 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1113 ext4_error(child->d_inode->i_sb, "ext4_get_parent", 1107 ext4_error(child->d_inode->i_sb,
1114 "bad inode number: %u", ino); 1108 "bad inode number: %u", ino);
1115 return ERR_PTR(-EIO); 1109 return ERR_PTR(-EIO);
1116 } 1110 }
@@ -1410,7 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1410 de = (struct ext4_dir_entry_2 *)((char *)fde + 1404 de = (struct ext4_dir_entry_2 *)((char *)fde +
1411 ext4_rec_len_from_disk(fde->rec_len, blocksize)); 1405 ext4_rec_len_from_disk(fde->rec_len, blocksize));
1412 if ((char *) de >= (((char *) root) + blocksize)) { 1406 if ((char *) de >= (((char *) root) + blocksize)) {
1413 ext4_error(dir->i_sb, __func__, 1407 ext4_error(dir->i_sb,
1414 "invalid rec_len for '..' in inode %lu", 1408 "invalid rec_len for '..' in inode %lu",
1415 dir->i_ino); 1409 dir->i_ino);
1416 brelse(bh); 1410 brelse(bh);
@@ -1575,8 +1569,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1575 1569
1576 if (levels && (dx_get_count(frames->entries) == 1570 if (levels && (dx_get_count(frames->entries) ==
1577 dx_get_limit(frames->entries))) { 1571 dx_get_limit(frames->entries))) {
1578 ext4_warning(sb, __func__, 1572 ext4_warning(sb, "Directory index full!");
1579 "Directory index full!");
1580 err = -ENOSPC; 1573 err = -ENOSPC;
1581 goto cleanup; 1574 goto cleanup;
1582 } 1575 }
@@ -1922,11 +1915,11 @@ static int empty_dir(struct inode *inode)
1922 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || 1915 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
1923 !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { 1916 !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
1924 if (err) 1917 if (err)
1925 ext4_error(inode->i_sb, __func__, 1918 ext4_error(inode->i_sb,
1926 "error %d reading directory #%lu offset 0", 1919 "error %d reading directory #%lu offset 0",
1927 err, inode->i_ino); 1920 err, inode->i_ino);
1928 else 1921 else
1929 ext4_warning(inode->i_sb, __func__, 1922 ext4_warning(inode->i_sb,
1930 "bad directory (dir #%lu) - no data block", 1923 "bad directory (dir #%lu) - no data block",
1931 inode->i_ino); 1924 inode->i_ino);
1932 return 1; 1925 return 1;
@@ -1937,7 +1930,7 @@ static int empty_dir(struct inode *inode)
1937 !le32_to_cpu(de1->inode) || 1930 !le32_to_cpu(de1->inode) ||
1938 strcmp(".", de->name) || 1931 strcmp(".", de->name) ||
1939 strcmp("..", de1->name)) { 1932 strcmp("..", de1->name)) {
1940 ext4_warning(inode->i_sb, "empty_dir", 1933 ext4_warning(inode->i_sb,
1941 "bad directory (dir #%lu) - no `.' or `..'", 1934 "bad directory (dir #%lu) - no `.' or `..'",
1942 inode->i_ino); 1935 inode->i_ino);
1943 brelse(bh); 1936 brelse(bh);
@@ -1955,7 +1948,7 @@ static int empty_dir(struct inode *inode)
1955 offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); 1948 offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
1956 if (!bh) { 1949 if (!bh) {
1957 if (err) 1950 if (err)
1958 ext4_error(sb, __func__, 1951 ext4_error(sb,
1959 "error %d reading directory" 1952 "error %d reading directory"
1960 " #%lu offset %u", 1953 " #%lu offset %u",
1961 err, inode->i_ino, offset); 1954 err, inode->i_ino, offset);
@@ -2026,11 +2019,18 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
2026 err = ext4_reserve_inode_write(handle, inode, &iloc); 2019 err = ext4_reserve_inode_write(handle, inode, &iloc);
2027 if (err) 2020 if (err)
2028 goto out_unlock; 2021 goto out_unlock;
2022 /*
2023 * Due to previous errors inode may be already a part of on-disk
2024 * orphan list. If so skip on-disk list modification.
2025 */
2026 if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
2027 (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
2028 goto mem_insert;
2029 2029
2030 /* Insert this inode at the head of the on-disk orphan list... */ 2030 /* Insert this inode at the head of the on-disk orphan list... */
2031 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); 2031 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
2032 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); 2032 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
2033 err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh); 2033 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
2034 rc = ext4_mark_iloc_dirty(handle, inode, &iloc); 2034 rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
2035 if (!err) 2035 if (!err)
2036 err = rc; 2036 err = rc;
@@ -2043,6 +2043,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
2043 * 2043 *
2044 * This is safe: on error we're going to ignore the orphan list 2044 * This is safe: on error we're going to ignore the orphan list
2045 * anyway on the next recovery. */ 2045 * anyway on the next recovery. */
2046mem_insert:
2046 if (!err) 2047 if (!err)
2047 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); 2048 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2048 2049
@@ -2102,7 +2103,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
2102 if (err) 2103 if (err)
2103 goto out_brelse; 2104 goto out_brelse;
2104 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); 2105 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2105 err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh); 2106 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
2106 } else { 2107 } else {
2107 struct ext4_iloc iloc2; 2108 struct ext4_iloc iloc2;
2108 struct inode *i_prev = 2109 struct inode *i_prev =
@@ -2171,7 +2172,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2171 if (retval) 2172 if (retval)
2172 goto end_rmdir; 2173 goto end_rmdir;
2173 if (!EXT4_DIR_LINK_EMPTY(inode)) 2174 if (!EXT4_DIR_LINK_EMPTY(inode))
2174 ext4_warning(inode->i_sb, "ext4_rmdir", 2175 ext4_warning(inode->i_sb,
2175 "empty directory has too many links (%d)", 2176 "empty directory has too many links (%d)",
2176 inode->i_nlink); 2177 inode->i_nlink);
2177 inode->i_version++; 2178 inode->i_version++;
@@ -2225,7 +2226,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2225 goto end_unlink; 2226 goto end_unlink;
2226 2227
2227 if (!inode->i_nlink) { 2228 if (!inode->i_nlink) {
2228 ext4_warning(inode->i_sb, "ext4_unlink", 2229 ext4_warning(inode->i_sb,
2229 "Deleting nonexistent file (%lu), %d", 2230 "Deleting nonexistent file (%lu), %d",
2230 inode->i_ino, inode->i_nlink); 2231 inode->i_ino, inode->i_nlink);
2231 inode->i_nlink = 1; 2232 inode->i_nlink = 1;
@@ -2479,7 +2480,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2479 } 2480 }
2480 } 2481 }
2481 if (retval) { 2482 if (retval) {
2482 ext4_warning(old_dir->i_sb, "ext4_rename", 2483 ext4_warning(old_dir->i_sb,
2483 "Deleting old file (%lu), %d, error=%d", 2484 "Deleting old file (%lu), %d, error=%d",
2484 old_dir->i_ino, old_dir->i_nlink, retval); 2485 old_dir->i_ino, old_dir->i_nlink, retval);
2485 } 2486 }
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3b2c5541d8a6..5692c48754a0 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -48,65 +48,54 @@ static int verify_group_input(struct super_block *sb,
48 48
49 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 49 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
50 if (group != sbi->s_groups_count) 50 if (group != sbi->s_groups_count)
51 ext4_warning(sb, __func__, 51 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
52 "Cannot add at group %u (only %u groups)",
53 input->group, sbi->s_groups_count); 52 input->group, sbi->s_groups_count);
54 else if (offset != 0) 53 else if (offset != 0)
55 ext4_warning(sb, __func__, "Last group not full"); 54 ext4_warning(sb, "Last group not full");
56 else if (input->reserved_blocks > input->blocks_count / 5) 55 else if (input->reserved_blocks > input->blocks_count / 5)
57 ext4_warning(sb, __func__, "Reserved blocks too high (%u)", 56 ext4_warning(sb, "Reserved blocks too high (%u)",
58 input->reserved_blocks); 57 input->reserved_blocks);
59 else if (free_blocks_count < 0) 58 else if (free_blocks_count < 0)
60 ext4_warning(sb, __func__, "Bad blocks count %u", 59 ext4_warning(sb, "Bad blocks count %u",
61 input->blocks_count); 60 input->blocks_count);
62 else if (!(bh = sb_bread(sb, end - 1))) 61 else if (!(bh = sb_bread(sb, end - 1)))
63 ext4_warning(sb, __func__, 62 ext4_warning(sb, "Cannot read last block (%llu)",
64 "Cannot read last block (%llu)",
65 end - 1); 63 end - 1);
66 else if (outside(input->block_bitmap, start, end)) 64 else if (outside(input->block_bitmap, start, end))
67 ext4_warning(sb, __func__, 65 ext4_warning(sb, "Block bitmap not in group (block %llu)",
68 "Block bitmap not in group (block %llu)",
69 (unsigned long long)input->block_bitmap); 66 (unsigned long long)input->block_bitmap);
70 else if (outside(input->inode_bitmap, start, end)) 67 else if (outside(input->inode_bitmap, start, end))
71 ext4_warning(sb, __func__, 68 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
72 "Inode bitmap not in group (block %llu)",
73 (unsigned long long)input->inode_bitmap); 69 (unsigned long long)input->inode_bitmap);
74 else if (outside(input->inode_table, start, end) || 70 else if (outside(input->inode_table, start, end) ||
75 outside(itend - 1, start, end)) 71 outside(itend - 1, start, end))
76 ext4_warning(sb, __func__, 72 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
77 "Inode table not in group (blocks %llu-%llu)",
78 (unsigned long long)input->inode_table, itend - 1); 73 (unsigned long long)input->inode_table, itend - 1);
79 else if (input->inode_bitmap == input->block_bitmap) 74 else if (input->inode_bitmap == input->block_bitmap)
80 ext4_warning(sb, __func__, 75 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
81 "Block bitmap same as inode bitmap (%llu)",
82 (unsigned long long)input->block_bitmap); 76 (unsigned long long)input->block_bitmap);
83 else if (inside(input->block_bitmap, input->inode_table, itend)) 77 else if (inside(input->block_bitmap, input->inode_table, itend))
84 ext4_warning(sb, __func__, 78 ext4_warning(sb, "Block bitmap (%llu) in inode table "
85 "Block bitmap (%llu) in inode table (%llu-%llu)", 79 "(%llu-%llu)",
86 (unsigned long long)input->block_bitmap, 80 (unsigned long long)input->block_bitmap,
87 (unsigned long long)input->inode_table, itend - 1); 81 (unsigned long long)input->inode_table, itend - 1);
88 else if (inside(input->inode_bitmap, input->inode_table, itend)) 82 else if (inside(input->inode_bitmap, input->inode_table, itend))
89 ext4_warning(sb, __func__, 83 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
90 "Inode bitmap (%llu) in inode table (%llu-%llu)", 84 "(%llu-%llu)",
91 (unsigned long long)input->inode_bitmap, 85 (unsigned long long)input->inode_bitmap,
92 (unsigned long long)input->inode_table, itend - 1); 86 (unsigned long long)input->inode_table, itend - 1);
93 else if (inside(input->block_bitmap, start, metaend)) 87 else if (inside(input->block_bitmap, start, metaend))
94 ext4_warning(sb, __func__, 88 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
95 "Block bitmap (%llu) in GDT table"
96 " (%llu-%llu)",
97 (unsigned long long)input->block_bitmap, 89 (unsigned long long)input->block_bitmap,
98 start, metaend - 1); 90 start, metaend - 1);
99 else if (inside(input->inode_bitmap, start, metaend)) 91 else if (inside(input->inode_bitmap, start, metaend))
100 ext4_warning(sb, __func__, 92 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
101 "Inode bitmap (%llu) in GDT table"
102 " (%llu-%llu)",
103 (unsigned long long)input->inode_bitmap, 93 (unsigned long long)input->inode_bitmap,
104 start, metaend - 1); 94 start, metaend - 1);
105 else if (inside(input->inode_table, start, metaend) || 95 else if (inside(input->inode_table, start, metaend) ||
106 inside(itend - 1, start, metaend)) 96 inside(itend - 1, start, metaend))
107 ext4_warning(sb, __func__, 97 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
108 "Inode table (%llu-%llu) overlaps" 98 "(%llu-%llu)",
109 "GDT table (%llu-%llu)",
110 (unsigned long long)input->inode_table, 99 (unsigned long long)input->inode_table,
111 itend - 1, start, metaend - 1); 100 itend - 1, start, metaend - 1);
112 else 101 else
@@ -364,8 +353,7 @@ static int verify_reserved_gdb(struct super_block *sb,
364 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { 353 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
365 if (le32_to_cpu(*p++) != 354 if (le32_to_cpu(*p++) !=
366 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ 355 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
367 ext4_warning(sb, __func__, 356 ext4_warning(sb, "reserved GDT %llu"
368 "reserved GDT %llu"
369 " missing grp %d (%llu)", 357 " missing grp %d (%llu)",
370 blk, grp, 358 blk, grp,
371 grp * 359 grp *
@@ -420,8 +408,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
420 */ 408 */
421 if (EXT4_SB(sb)->s_sbh->b_blocknr != 409 if (EXT4_SB(sb)->s_sbh->b_blocknr !=
422 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 410 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
423 ext4_warning(sb, __func__, 411 ext4_warning(sb, "won't resize using backup superblock at %llu",
424 "won't resize using backup superblock at %llu",
425 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 412 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
426 return -EPERM; 413 return -EPERM;
427 } 414 }
@@ -444,8 +431,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
444 431
445 data = (__le32 *)dind->b_data; 432 data = (__le32 *)dind->b_data;
446 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 433 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
447 ext4_warning(sb, __func__, 434 ext4_warning(sb, "new group %u GDT block %llu not reserved",
448 "new group %u GDT block %llu not reserved",
449 input->group, gdblock); 435 input->group, gdblock);
450 err = -EINVAL; 436 err = -EINVAL;
451 goto exit_dind; 437 goto exit_dind;
@@ -468,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
468 GFP_NOFS); 454 GFP_NOFS);
469 if (!n_group_desc) { 455 if (!n_group_desc) {
470 err = -ENOMEM; 456 err = -ENOMEM;
471 ext4_warning(sb, __func__, 457 ext4_warning(sb,
472 "not enough memory for %lu groups", gdb_num + 1); 458 "not enough memory for %lu groups", gdb_num + 1);
473 goto exit_inode; 459 goto exit_inode;
474 } 460 }
@@ -567,8 +553,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
567 /* Get each reserved primary GDT block and verify it holds backups */ 553 /* Get each reserved primary GDT block and verify it holds backups */
568 for (res = 0; res < reserved_gdb; res++, blk++) { 554 for (res = 0; res < reserved_gdb; res++, blk++) {
569 if (le32_to_cpu(*data) != blk) { 555 if (le32_to_cpu(*data) != blk) {
570 ext4_warning(sb, __func__, 556 ext4_warning(sb, "reserved block %llu"
571 "reserved block %llu"
572 " not at offset %ld", 557 " not at offset %ld",
573 blk, 558 blk,
574 (long)(data - (__le32 *)dind->b_data)); 559 (long)(data - (__le32 *)dind->b_data));
@@ -713,8 +698,7 @@ static void update_backups(struct super_block *sb,
713 */ 698 */
714exit_err: 699exit_err:
715 if (err) { 700 if (err) {
716 ext4_warning(sb, __func__, 701 ext4_warning(sb, "can't update backup for group %u (err %d), "
717 "can't update backup for group %u (err %d), "
718 "forcing fsck on next reboot", group, err); 702 "forcing fsck on next reboot", group, err);
719 sbi->s_mount_state &= ~EXT4_VALID_FS; 703 sbi->s_mount_state &= ~EXT4_VALID_FS;
720 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 704 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
@@ -753,20 +737,19 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
753 737
754 if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, 738 if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
755 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { 739 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
756 ext4_warning(sb, __func__, 740 ext4_warning(sb, "Can't resize non-sparse filesystem further");
757 "Can't resize non-sparse filesystem further");
758 return -EPERM; 741 return -EPERM;
759 } 742 }
760 743
761 if (ext4_blocks_count(es) + input->blocks_count < 744 if (ext4_blocks_count(es) + input->blocks_count <
762 ext4_blocks_count(es)) { 745 ext4_blocks_count(es)) {
763 ext4_warning(sb, __func__, "blocks_count overflow"); 746 ext4_warning(sb, "blocks_count overflow");
764 return -EINVAL; 747 return -EINVAL;
765 } 748 }
766 749
767 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 750 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
768 le32_to_cpu(es->s_inodes_count)) { 751 le32_to_cpu(es->s_inodes_count)) {
769 ext4_warning(sb, __func__, "inodes_count overflow"); 752 ext4_warning(sb, "inodes_count overflow");
770 return -EINVAL; 753 return -EINVAL;
771 } 754 }
772 755
@@ -774,14 +757,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
774 if (!EXT4_HAS_COMPAT_FEATURE(sb, 757 if (!EXT4_HAS_COMPAT_FEATURE(sb,
775 EXT4_FEATURE_COMPAT_RESIZE_INODE) 758 EXT4_FEATURE_COMPAT_RESIZE_INODE)
776 || !le16_to_cpu(es->s_reserved_gdt_blocks)) { 759 || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
777 ext4_warning(sb, __func__, 760 ext4_warning(sb,
778 "No reserved GDT blocks, can't resize"); 761 "No reserved GDT blocks, can't resize");
779 return -EPERM; 762 return -EPERM;
780 } 763 }
781 inode = ext4_iget(sb, EXT4_RESIZE_INO); 764 inode = ext4_iget(sb, EXT4_RESIZE_INO);
782 if (IS_ERR(inode)) { 765 if (IS_ERR(inode)) {
783 ext4_warning(sb, __func__, 766 ext4_warning(sb, "Error opening resize inode");
784 "Error opening resize inode");
785 return PTR_ERR(inode); 767 return PTR_ERR(inode);
786 } 768 }
787 } 769 }
@@ -810,8 +792,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
810 792
811 mutex_lock(&sbi->s_resize_lock); 793 mutex_lock(&sbi->s_resize_lock);
812 if (input->group != sbi->s_groups_count) { 794 if (input->group != sbi->s_groups_count) {
813 ext4_warning(sb, __func__, 795 ext4_warning(sb, "multiple resizers run on filesystem!");
814 "multiple resizers run on filesystem!");
815 err = -EBUSY; 796 err = -EBUSY;
816 goto exit_journal; 797 goto exit_journal;
817 } 798 }
@@ -997,13 +978,12 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
997 " too large to resize to %llu blocks safely\n", 978 " too large to resize to %llu blocks safely\n",
998 sb->s_id, n_blocks_count); 979 sb->s_id, n_blocks_count);
999 if (sizeof(sector_t) < 8) 980 if (sizeof(sector_t) < 8)
1000 ext4_warning(sb, __func__, "CONFIG_LBDAF not enabled"); 981 ext4_warning(sb, "CONFIG_LBDAF not enabled");
1001 return -EINVAL; 982 return -EINVAL;
1002 } 983 }
1003 984
1004 if (n_blocks_count < o_blocks_count) { 985 if (n_blocks_count < o_blocks_count) {
1005 ext4_warning(sb, __func__, 986 ext4_warning(sb, "can't shrink FS - resize aborted");
1006 "can't shrink FS - resize aborted");
1007 return -EBUSY; 987 return -EBUSY;
1008 } 988 }
1009 989
@@ -1011,15 +991,14 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1011 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 991 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1012 992
1013 if (last == 0) { 993 if (last == 0) {
1014 ext4_warning(sb, __func__, 994 ext4_warning(sb, "need to use ext2online to resize further");
1015 "need to use ext2online to resize further");
1016 return -EPERM; 995 return -EPERM;
1017 } 996 }
1018 997
1019 add = EXT4_BLOCKS_PER_GROUP(sb) - last; 998 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1020 999
1021 if (o_blocks_count + add < o_blocks_count) { 1000 if (o_blocks_count + add < o_blocks_count) {
1022 ext4_warning(sb, __func__, "blocks_count overflow"); 1001 ext4_warning(sb, "blocks_count overflow");
1023 return -EINVAL; 1002 return -EINVAL;
1024 } 1003 }
1025 1004
@@ -1027,16 +1006,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1027 add = n_blocks_count - o_blocks_count; 1006 add = n_blocks_count - o_blocks_count;
1028 1007
1029 if (o_blocks_count + add < n_blocks_count) 1008 if (o_blocks_count + add < n_blocks_count)
1030 ext4_warning(sb, __func__, 1009 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1031 "will only finish group (%llu"
1032 " blocks, %u new)",
1033 o_blocks_count + add, add); 1010 o_blocks_count + add, add);
1034 1011
1035 /* See if the device is actually as big as what was requested */ 1012 /* See if the device is actually as big as what was requested */
1036 bh = sb_bread(sb, o_blocks_count + add - 1); 1013 bh = sb_bread(sb, o_blocks_count + add - 1);
1037 if (!bh) { 1014 if (!bh) {
1038 ext4_warning(sb, __func__, 1015 ext4_warning(sb, "can't read last block, resize aborted");
1039 "can't read last block, resize aborted");
1040 return -ENOSPC; 1016 return -ENOSPC;
1041 } 1017 }
1042 brelse(bh); 1018 brelse(bh);
@@ -1047,14 +1023,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1047 handle = ext4_journal_start_sb(sb, 3); 1023 handle = ext4_journal_start_sb(sb, 3);
1048 if (IS_ERR(handle)) { 1024 if (IS_ERR(handle)) {
1049 err = PTR_ERR(handle); 1025 err = PTR_ERR(handle);
1050 ext4_warning(sb, __func__, "error %d on journal start", err); 1026 ext4_warning(sb, "error %d on journal start", err);
1051 goto exit_put; 1027 goto exit_put;
1052 } 1028 }
1053 1029
1054 mutex_lock(&EXT4_SB(sb)->s_resize_lock); 1030 mutex_lock(&EXT4_SB(sb)->s_resize_lock);
1055 if (o_blocks_count != ext4_blocks_count(es)) { 1031 if (o_blocks_count != ext4_blocks_count(es)) {
1056 ext4_warning(sb, __func__, 1032 ext4_warning(sb, "multiple resizers run on filesystem!");
1057 "multiple resizers run on filesystem!");
1058 mutex_unlock(&EXT4_SB(sb)->s_resize_lock); 1033 mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
1059 ext4_journal_stop(handle); 1034 ext4_journal_stop(handle);
1060 err = -EBUSY; 1035 err = -EBUSY;
@@ -1063,8 +1038,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1063 1038
1064 if ((err = ext4_journal_get_write_access(handle, 1039 if ((err = ext4_journal_get_write_access(handle,
1065 EXT4_SB(sb)->s_sbh))) { 1040 EXT4_SB(sb)->s_sbh))) {
1066 ext4_warning(sb, __func__, 1041 ext4_warning(sb, "error %d on journal write access", err);
1067 "error %d on journal write access", err);
1068 mutex_unlock(&EXT4_SB(sb)->s_resize_lock); 1042 mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
1069 ext4_journal_stop(handle); 1043 ext4_journal_stop(handle);
1070 goto exit_put; 1044 goto exit_put;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index edcf3b0239d1..2b83b96cb2eb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -333,7 +333,7 @@ static void ext4_handle_error(struct super_block *sb)
333 sb->s_id); 333 sb->s_id);
334} 334}
335 335
336void ext4_error(struct super_block *sb, const char *function, 336void __ext4_error(struct super_block *sb, const char *function,
337 const char *fmt, ...) 337 const char *fmt, ...)
338{ 338{
339 va_list args; 339 va_list args;
@@ -347,6 +347,42 @@ void ext4_error(struct super_block *sb, const char *function,
347 ext4_handle_error(sb); 347 ext4_handle_error(sb);
348} 348}
349 349
350void ext4_error_inode(const char *function, struct inode *inode,
351 const char *fmt, ...)
352{
353 va_list args;
354
355 va_start(args, fmt);
356 printk(KERN_CRIT "EXT4-fs error (device %s): %s: inode #%lu: (comm %s) ",
357 inode->i_sb->s_id, function, inode->i_ino, current->comm);
358 vprintk(fmt, args);
359 printk("\n");
360 va_end(args);
361
362 ext4_handle_error(inode->i_sb);
363}
364
365void ext4_error_file(const char *function, struct file *file,
366 const char *fmt, ...)
367{
368 va_list args;
369 struct inode *inode = file->f_dentry->d_inode;
370 char pathname[80], *path;
371
372 va_start(args, fmt);
373 path = d_path(&(file->f_path), pathname, sizeof(pathname));
374 if (!path)
375 path = "(unknown)";
376 printk(KERN_CRIT
377 "EXT4-fs error (device %s): %s: inode #%lu (comm %s path %s): ",
378 inode->i_sb->s_id, function, inode->i_ino, current->comm, path);
379 vprintk(fmt, args);
380 printk("\n");
381 va_end(args);
382
383 ext4_handle_error(inode->i_sb);
384}
385
350static const char *ext4_decode_error(struct super_block *sb, int errno, 386static const char *ext4_decode_error(struct super_block *sb, int errno,
351 char nbuf[16]) 387 char nbuf[16])
352{ 388{
@@ -450,7 +486,7 @@ void ext4_msg (struct super_block * sb, const char *prefix,
450 va_end(args); 486 va_end(args);
451} 487}
452 488
453void ext4_warning(struct super_block *sb, const char *function, 489void __ext4_warning(struct super_block *sb, const char *function,
454 const char *fmt, ...) 490 const char *fmt, ...)
455{ 491{
456 va_list args; 492 va_list args;
@@ -507,7 +543,7 @@ void ext4_update_dynamic_rev(struct super_block *sb)
507 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) 543 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
508 return; 544 return;
509 545
510 ext4_warning(sb, __func__, 546 ext4_warning(sb,
511 "updating to rev %d because of new feature flag, " 547 "updating to rev %d because of new feature flag, "
512 "running e2fsck is recommended", 548 "running e2fsck is recommended",
513 EXT4_DYNAMIC_REV); 549 EXT4_DYNAMIC_REV);
@@ -708,7 +744,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
708#ifdef CONFIG_QUOTA 744#ifdef CONFIG_QUOTA
709 ei->i_reserved_quota = 0; 745 ei->i_reserved_quota = 0;
710#endif 746#endif
711 INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); 747 INIT_LIST_HEAD(&ei->i_completed_io_list);
748 spin_lock_init(&ei->i_completed_io_lock);
712 ei->cur_aio_dio = NULL; 749 ei->cur_aio_dio = NULL;
713 ei->i_sync_tid = 0; 750 ei->i_sync_tid = 0;
714 ei->i_datasync_tid = 0; 751 ei->i_datasync_tid = 0;
@@ -797,10 +834,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
797 if (sbi->s_qf_names[GRPQUOTA]) 834 if (sbi->s_qf_names[GRPQUOTA])
798 seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); 835 seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
799 836
800 if (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) 837 if (test_opt(sb, USRQUOTA))
801 seq_puts(seq, ",usrquota"); 838 seq_puts(seq, ",usrquota");
802 839
803 if (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) 840 if (test_opt(sb, GRPQUOTA))
804 seq_puts(seq, ",grpquota"); 841 seq_puts(seq, ",grpquota");
805#endif 842#endif
806} 843}
@@ -927,6 +964,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
927 if (test_opt(sb, NOLOAD)) 964 if (test_opt(sb, NOLOAD))
928 seq_puts(seq, ",norecovery"); 965 seq_puts(seq, ",norecovery");
929 966
967 if (test_opt(sb, DIOREAD_NOLOCK))
968 seq_puts(seq, ",dioread_nolock");
969
930 ext4_show_quota_options(seq, sb); 970 ext4_show_quota_options(seq, sb);
931 971
932 return 0; 972 return 0;
@@ -1100,6 +1140,7 @@ enum {
1100 Opt_stripe, Opt_delalloc, Opt_nodelalloc, 1140 Opt_stripe, Opt_delalloc, Opt_nodelalloc,
1101 Opt_block_validity, Opt_noblock_validity, 1141 Opt_block_validity, Opt_noblock_validity,
1102 Opt_inode_readahead_blks, Opt_journal_ioprio, 1142 Opt_inode_readahead_blks, Opt_journal_ioprio,
1143 Opt_dioread_nolock, Opt_dioread_lock,
1103 Opt_discard, Opt_nodiscard, 1144 Opt_discard, Opt_nodiscard,
1104}; 1145};
1105 1146
@@ -1167,6 +1208,8 @@ static const match_table_t tokens = {
1167 {Opt_auto_da_alloc, "auto_da_alloc=%u"}, 1208 {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1168 {Opt_auto_da_alloc, "auto_da_alloc"}, 1209 {Opt_auto_da_alloc, "auto_da_alloc"},
1169 {Opt_noauto_da_alloc, "noauto_da_alloc"}, 1210 {Opt_noauto_da_alloc, "noauto_da_alloc"},
1211 {Opt_dioread_nolock, "dioread_nolock"},
1212 {Opt_dioread_lock, "dioread_lock"},
1170 {Opt_discard, "discard"}, 1213 {Opt_discard, "discard"},
1171 {Opt_nodiscard, "nodiscard"}, 1214 {Opt_nodiscard, "nodiscard"},
1172 {Opt_err, NULL}, 1215 {Opt_err, NULL},
@@ -1196,6 +1239,66 @@ static ext4_fsblk_t get_sb_block(void **data)
1196} 1239}
1197 1240
1198#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) 1241#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1242static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
1243 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1244
1245#ifdef CONFIG_QUOTA
1246static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1247{
1248 struct ext4_sb_info *sbi = EXT4_SB(sb);
1249 char *qname;
1250
1251 if (sb_any_quota_loaded(sb) &&
1252 !sbi->s_qf_names[qtype]) {
1253 ext4_msg(sb, KERN_ERR,
1254 "Cannot change journaled "
1255 "quota options when quota turned on");
1256 return 0;
1257 }
1258 qname = match_strdup(args);
1259 if (!qname) {
1260 ext4_msg(sb, KERN_ERR,
1261 "Not enough memory for storing quotafile name");
1262 return 0;
1263 }
1264 if (sbi->s_qf_names[qtype] &&
1265 strcmp(sbi->s_qf_names[qtype], qname)) {
1266 ext4_msg(sb, KERN_ERR,
1267 "%s quota file already specified", QTYPE2NAME(qtype));
1268 kfree(qname);
1269 return 0;
1270 }
1271 sbi->s_qf_names[qtype] = qname;
1272 if (strchr(sbi->s_qf_names[qtype], '/')) {
1273 ext4_msg(sb, KERN_ERR,
1274 "quotafile must be on filesystem root");
1275 kfree(sbi->s_qf_names[qtype]);
1276 sbi->s_qf_names[qtype] = NULL;
1277 return 0;
1278 }
1279 set_opt(sbi->s_mount_opt, QUOTA);
1280 return 1;
1281}
1282
1283static int clear_qf_name(struct super_block *sb, int qtype)
1284{
1285
1286 struct ext4_sb_info *sbi = EXT4_SB(sb);
1287
1288 if (sb_any_quota_loaded(sb) &&
1289 sbi->s_qf_names[qtype]) {
1290 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
1291 " when quota turned on");
1292 return 0;
1293 }
1294 /*
1295 * The space will be released later when all options are confirmed
1296 * to be correct
1297 */
1298 sbi->s_qf_names[qtype] = NULL;
1299 return 1;
1300}
1301#endif
1199 1302
1200static int parse_options(char *options, struct super_block *sb, 1303static int parse_options(char *options, struct super_block *sb,
1201 unsigned long *journal_devnum, 1304 unsigned long *journal_devnum,
@@ -1208,8 +1311,7 @@ static int parse_options(char *options, struct super_block *sb,
1208 int data_opt = 0; 1311 int data_opt = 0;
1209 int option; 1312 int option;
1210#ifdef CONFIG_QUOTA 1313#ifdef CONFIG_QUOTA
1211 int qtype, qfmt; 1314 int qfmt;
1212 char *qname;
1213#endif 1315#endif
1214 1316
1215 if (!options) 1317 if (!options)
@@ -1220,19 +1322,31 @@ static int parse_options(char *options, struct super_block *sb,
1220 if (!*p) 1322 if (!*p)
1221 continue; 1323 continue;
1222 1324
1325 /*
1326 * Initialize args struct so we know whether arg was
1327 * found; some options take optional arguments.
1328 */
1329 args[0].to = args[0].from = 0;
1223 token = match_token(p, tokens, args); 1330 token = match_token(p, tokens, args);
1224 switch (token) { 1331 switch (token) {
1225 case Opt_bsd_df: 1332 case Opt_bsd_df:
1333 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1226 clear_opt(sbi->s_mount_opt, MINIX_DF); 1334 clear_opt(sbi->s_mount_opt, MINIX_DF);
1227 break; 1335 break;
1228 case Opt_minix_df: 1336 case Opt_minix_df:
1337 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1229 set_opt(sbi->s_mount_opt, MINIX_DF); 1338 set_opt(sbi->s_mount_opt, MINIX_DF);
1339
1230 break; 1340 break;
1231 case Opt_grpid: 1341 case Opt_grpid:
1342 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1232 set_opt(sbi->s_mount_opt, GRPID); 1343 set_opt(sbi->s_mount_opt, GRPID);
1344
1233 break; 1345 break;
1234 case Opt_nogrpid: 1346 case Opt_nogrpid:
1347 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1235 clear_opt(sbi->s_mount_opt, GRPID); 1348 clear_opt(sbi->s_mount_opt, GRPID);
1349
1236 break; 1350 break;
1237 case Opt_resuid: 1351 case Opt_resuid:
1238 if (match_int(&args[0], &option)) 1352 if (match_int(&args[0], &option))
@@ -1369,14 +1483,13 @@ static int parse_options(char *options, struct super_block *sb,
1369 data_opt = EXT4_MOUNT_WRITEBACK_DATA; 1483 data_opt = EXT4_MOUNT_WRITEBACK_DATA;
1370 datacheck: 1484 datacheck:
1371 if (is_remount) { 1485 if (is_remount) {
1372 if ((sbi->s_mount_opt & EXT4_MOUNT_DATA_FLAGS) 1486 if (test_opt(sb, DATA_FLAGS) != data_opt) {
1373 != data_opt) {
1374 ext4_msg(sb, KERN_ERR, 1487 ext4_msg(sb, KERN_ERR,
1375 "Cannot change data mode on remount"); 1488 "Cannot change data mode on remount");
1376 return 0; 1489 return 0;
1377 } 1490 }
1378 } else { 1491 } else {
1379 sbi->s_mount_opt &= ~EXT4_MOUNT_DATA_FLAGS; 1492 clear_opt(sbi->s_mount_opt, DATA_FLAGS);
1380 sbi->s_mount_opt |= data_opt; 1493 sbi->s_mount_opt |= data_opt;
1381 } 1494 }
1382 break; 1495 break;
@@ -1388,63 +1501,22 @@ static int parse_options(char *options, struct super_block *sb,
1388 break; 1501 break;
1389#ifdef CONFIG_QUOTA 1502#ifdef CONFIG_QUOTA
1390 case Opt_usrjquota: 1503 case Opt_usrjquota:
1391 qtype = USRQUOTA; 1504 if (!set_qf_name(sb, USRQUOTA, &args[0]))
1392 goto set_qf_name;
1393 case Opt_grpjquota:
1394 qtype = GRPQUOTA;
1395set_qf_name:
1396 if (sb_any_quota_loaded(sb) &&
1397 !sbi->s_qf_names[qtype]) {
1398 ext4_msg(sb, KERN_ERR,
1399 "Cannot change journaled "
1400 "quota options when quota turned on");
1401 return 0; 1505 return 0;
1402 } 1506 break;
1403 qname = match_strdup(&args[0]); 1507 case Opt_grpjquota:
1404 if (!qname) { 1508 if (!set_qf_name(sb, GRPQUOTA, &args[0]))
1405 ext4_msg(sb, KERN_ERR,
1406 "Not enough memory for "
1407 "storing quotafile name");
1408 return 0;
1409 }
1410 if (sbi->s_qf_names[qtype] &&
1411 strcmp(sbi->s_qf_names[qtype], qname)) {
1412 ext4_msg(sb, KERN_ERR,
1413 "%s quota file already "
1414 "specified", QTYPE2NAME(qtype));
1415 kfree(qname);
1416 return 0;
1417 }
1418 sbi->s_qf_names[qtype] = qname;
1419 if (strchr(sbi->s_qf_names[qtype], '/')) {
1420 ext4_msg(sb, KERN_ERR,
1421 "quotafile must be on "
1422 "filesystem root");
1423 kfree(sbi->s_qf_names[qtype]);
1424 sbi->s_qf_names[qtype] = NULL;
1425 return 0; 1509 return 0;
1426 }
1427 set_opt(sbi->s_mount_opt, QUOTA);
1428 break; 1510 break;
1429 case Opt_offusrjquota: 1511 case Opt_offusrjquota:
1430 qtype = USRQUOTA; 1512 if (!clear_qf_name(sb, USRQUOTA))
1431 goto clear_qf_name; 1513 return 0;
1514 break;
1432 case Opt_offgrpjquota: 1515 case Opt_offgrpjquota:
1433 qtype = GRPQUOTA; 1516 if (!clear_qf_name(sb, GRPQUOTA))
1434clear_qf_name:
1435 if (sb_any_quota_loaded(sb) &&
1436 sbi->s_qf_names[qtype]) {
1437 ext4_msg(sb, KERN_ERR, "Cannot change "
1438 "journaled quota options when "
1439 "quota turned on");
1440 return 0; 1517 return 0;
1441 }
1442 /*
1443 * The space will be released later when all options
1444 * are confirmed to be correct
1445 */
1446 sbi->s_qf_names[qtype] = NULL;
1447 break; 1518 break;
1519
1448 case Opt_jqfmt_vfsold: 1520 case Opt_jqfmt_vfsold:
1449 qfmt = QFMT_VFS_OLD; 1521 qfmt = QFMT_VFS_OLD;
1450 goto set_qf_format; 1522 goto set_qf_format;
@@ -1509,10 +1581,11 @@ set_qf_format:
1509 clear_opt(sbi->s_mount_opt, BARRIER); 1581 clear_opt(sbi->s_mount_opt, BARRIER);
1510 break; 1582 break;
1511 case Opt_barrier: 1583 case Opt_barrier:
1512 if (match_int(&args[0], &option)) { 1584 if (args[0].from) {
1513 set_opt(sbi->s_mount_opt, BARRIER); 1585 if (match_int(&args[0], &option))
1514 break; 1586 return 0;
1515 } 1587 } else
1588 option = 1; /* No argument, default to 1 */
1516 if (option) 1589 if (option)
1517 set_opt(sbi->s_mount_opt, BARRIER); 1590 set_opt(sbi->s_mount_opt, BARRIER);
1518 else 1591 else
@@ -1585,10 +1658,11 @@ set_qf_format:
1585 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); 1658 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
1586 break; 1659 break;
1587 case Opt_auto_da_alloc: 1660 case Opt_auto_da_alloc:
1588 if (match_int(&args[0], &option)) { 1661 if (args[0].from) {
1589 clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); 1662 if (match_int(&args[0], &option))
1590 break; 1663 return 0;
1591 } 1664 } else
1665 option = 1; /* No argument, default to 1 */
1592 if (option) 1666 if (option)
1593 clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); 1667 clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
1594 else 1668 else
@@ -1600,6 +1674,12 @@ set_qf_format:
1600 case Opt_nodiscard: 1674 case Opt_nodiscard:
1601 clear_opt(sbi->s_mount_opt, DISCARD); 1675 clear_opt(sbi->s_mount_opt, DISCARD);
1602 break; 1676 break;
1677 case Opt_dioread_nolock:
1678 set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
1679 break;
1680 case Opt_dioread_lock:
1681 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
1682 break;
1603 default: 1683 default:
1604 ext4_msg(sb, KERN_ERR, 1684 ext4_msg(sb, KERN_ERR,
1605 "Unrecognized mount option \"%s\" " 1685 "Unrecognized mount option \"%s\" "
@@ -1609,18 +1689,13 @@ set_qf_format:
1609 } 1689 }
1610#ifdef CONFIG_QUOTA 1690#ifdef CONFIG_QUOTA
1611 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { 1691 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1612 if ((sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) && 1692 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1613 sbi->s_qf_names[USRQUOTA])
1614 clear_opt(sbi->s_mount_opt, USRQUOTA); 1693 clear_opt(sbi->s_mount_opt, USRQUOTA);
1615 1694
1616 if ((sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) && 1695 if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1617 sbi->s_qf_names[GRPQUOTA])
1618 clear_opt(sbi->s_mount_opt, GRPQUOTA); 1696 clear_opt(sbi->s_mount_opt, GRPQUOTA);
1619 1697
1620 if ((sbi->s_qf_names[USRQUOTA] && 1698 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1621 (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)) ||
1622 (sbi->s_qf_names[GRPQUOTA] &&
1623 (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA))) {
1624 ext4_msg(sb, KERN_ERR, "old and new quota " 1699 ext4_msg(sb, KERN_ERR, "old and new quota "
1625 "format mixing"); 1700 "format mixing");
1626 return 0; 1701 return 0;
@@ -2423,8 +2498,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2423 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 2498 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
2424 if (def_mount_opts & EXT4_DEFM_DEBUG) 2499 if (def_mount_opts & EXT4_DEFM_DEBUG)
2425 set_opt(sbi->s_mount_opt, DEBUG); 2500 set_opt(sbi->s_mount_opt, DEBUG);
2426 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) 2501 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
2502 ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
2503 "2.6.38");
2427 set_opt(sbi->s_mount_opt, GRPID); 2504 set_opt(sbi->s_mount_opt, GRPID);
2505 }
2428 if (def_mount_opts & EXT4_DEFM_UID16) 2506 if (def_mount_opts & EXT4_DEFM_UID16)
2429 set_opt(sbi->s_mount_opt, NO_UID32); 2507 set_opt(sbi->s_mount_opt, NO_UID32);
2430#ifdef CONFIG_EXT4_FS_XATTR 2508#ifdef CONFIG_EXT4_FS_XATTR
@@ -2436,11 +2514,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2436 set_opt(sbi->s_mount_opt, POSIX_ACL); 2514 set_opt(sbi->s_mount_opt, POSIX_ACL);
2437#endif 2515#endif
2438 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 2516 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
2439 sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; 2517 set_opt(sbi->s_mount_opt, JOURNAL_DATA);
2440 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 2518 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
2441 sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA; 2519 set_opt(sbi->s_mount_opt, ORDERED_DATA);
2442 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 2520 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
2443 sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA; 2521 set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
2444 2522
2445 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) 2523 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
2446 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 2524 set_opt(sbi->s_mount_opt, ERRORS_PANIC);
@@ -2468,7 +2546,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2468 goto failed_mount; 2546 goto failed_mount;
2469 2547
2470 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 2548 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
2471 ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 2549 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
2472 2550
2473 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && 2551 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
2474 (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || 2552 (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
@@ -2757,7 +2835,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2757 EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { 2835 EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
2758 ext4_msg(sb, KERN_ERR, "required journal recovery " 2836 ext4_msg(sb, KERN_ERR, "required journal recovery "
2759 "suppressed and not mounted read-only"); 2837 "suppressed and not mounted read-only");
2760 goto failed_mount4; 2838 goto failed_mount_wq;
2761 } else { 2839 } else {
2762 clear_opt(sbi->s_mount_opt, DATA_FLAGS); 2840 clear_opt(sbi->s_mount_opt, DATA_FLAGS);
2763 set_opt(sbi->s_mount_opt, WRITEBACK_DATA); 2841 set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
@@ -2770,7 +2848,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2770 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 2848 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
2771 JBD2_FEATURE_INCOMPAT_64BIT)) { 2849 JBD2_FEATURE_INCOMPAT_64BIT)) {
2772 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 2850 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
2773 goto failed_mount4; 2851 goto failed_mount_wq;
2774 } 2852 }
2775 2853
2776 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 2854 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
@@ -2809,7 +2887,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2809 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 2887 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
2810 ext4_msg(sb, KERN_ERR, "Journal does not support " 2888 ext4_msg(sb, KERN_ERR, "Journal does not support "
2811 "requested data journaling mode"); 2889 "requested data journaling mode");
2812 goto failed_mount4; 2890 goto failed_mount_wq;
2813 } 2891 }
2814 default: 2892 default:
2815 break; 2893 break;
@@ -2817,13 +2895,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2817 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); 2895 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
2818 2896
2819no_journal: 2897no_journal:
2820
2821 if (test_opt(sb, NOBH)) { 2898 if (test_opt(sb, NOBH)) {
2822 if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { 2899 if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
2823 ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - " 2900 ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
2824 "its supported only with writeback mode"); 2901 "its supported only with writeback mode");
2825 clear_opt(sbi->s_mount_opt, NOBH); 2902 clear_opt(sbi->s_mount_opt, NOBH);
2826 } 2903 }
2904 if (test_opt(sb, DIOREAD_NOLOCK)) {
2905 ext4_msg(sb, KERN_WARNING, "dioread_nolock option is "
2906 "not supported with nobh mode");
2907 goto failed_mount_wq;
2908 }
2827 } 2909 }
2828 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); 2910 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
2829 if (!EXT4_SB(sb)->dio_unwritten_wq) { 2911 if (!EXT4_SB(sb)->dio_unwritten_wq) {
@@ -2888,6 +2970,18 @@ no_journal:
2888 "requested data journaling mode"); 2970 "requested data journaling mode");
2889 clear_opt(sbi->s_mount_opt, DELALLOC); 2971 clear_opt(sbi->s_mount_opt, DELALLOC);
2890 } 2972 }
2973 if (test_opt(sb, DIOREAD_NOLOCK)) {
2974 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
2975 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
2976 "option - requested data journaling mode");
2977 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
2978 }
2979 if (sb->s_blocksize < PAGE_SIZE) {
2980 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
2981 "option - block size is too small");
2982 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
2983 }
2984 }
2891 2985
2892 err = ext4_setup_system_zone(sb); 2986 err = ext4_setup_system_zone(sb);
2893 if (err) { 2987 if (err) {
@@ -3351,10 +3445,9 @@ static void ext4_clear_journal_err(struct super_block *sb,
3351 char nbuf[16]; 3445 char nbuf[16];
3352 3446
3353 errstr = ext4_decode_error(sb, j_errno, nbuf); 3447 errstr = ext4_decode_error(sb, j_errno, nbuf);
3354 ext4_warning(sb, __func__, "Filesystem error recorded " 3448 ext4_warning(sb, "Filesystem error recorded "
3355 "from previous mount: %s", errstr); 3449 "from previous mount: %s", errstr);
3356 ext4_warning(sb, __func__, "Marking fs in need of " 3450 ext4_warning(sb, "Marking fs in need of filesystem check.");
3357 "filesystem check.");
3358 3451
3359 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 3452 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
3360 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 3453 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
@@ -3505,7 +3598,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3505 ext4_abort(sb, __func__, "Abort forced by user"); 3598 ext4_abort(sb, __func__, "Abort forced by user");
3506 3599
3507 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 3600 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3508 ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 3601 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3509 3602
3510 es = sbi->s_es; 3603 es = sbi->s_es;
3511 3604
@@ -3908,9 +4001,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
3908 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 4001 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
3909 int err = 0; 4002 int err = 0;
3910 int offset = off & (sb->s_blocksize - 1); 4003 int offset = off & (sb->s_blocksize - 1);
3911 int tocopy;
3912 int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL; 4004 int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL;
3913 size_t towrite = len;
3914 struct buffer_head *bh; 4005 struct buffer_head *bh;
3915 handle_t *handle = journal_current_handle(); 4006 handle_t *handle = journal_current_handle();
3916 4007
@@ -3920,52 +4011,53 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
3920 (unsigned long long)off, (unsigned long long)len); 4011 (unsigned long long)off, (unsigned long long)len);
3921 return -EIO; 4012 return -EIO;
3922 } 4013 }
4014 /*
4015 * Since we account only one data block in transaction credits,
4016 * then it is impossible to cross a block boundary.
4017 */
4018 if (sb->s_blocksize - offset < len) {
4019 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
4020 " cancelled because not block aligned",
4021 (unsigned long long)off, (unsigned long long)len);
4022 return -EIO;
4023 }
4024
3923 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 4025 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
3924 while (towrite > 0) { 4026 bh = ext4_bread(handle, inode, blk, 1, &err);
3925 tocopy = sb->s_blocksize - offset < towrite ? 4027 if (!bh)
3926 sb->s_blocksize - offset : towrite; 4028 goto out;
3927 bh = ext4_bread(handle, inode, blk, 1, &err); 4029 if (journal_quota) {
3928 if (!bh) 4030 err = ext4_journal_get_write_access(handle, bh);
4031 if (err) {
4032 brelse(bh);
3929 goto out; 4033 goto out;
3930 if (journal_quota) {
3931 err = ext4_journal_get_write_access(handle, bh);
3932 if (err) {
3933 brelse(bh);
3934 goto out;
3935 }
3936 } 4034 }
3937 lock_buffer(bh);
3938 memcpy(bh->b_data+offset, data, tocopy);
3939 flush_dcache_page(bh->b_page);
3940 unlock_buffer(bh);
3941 if (journal_quota)
3942 err = ext4_handle_dirty_metadata(handle, NULL, bh);
3943 else {
3944 /* Always do at least ordered writes for quotas */
3945 err = ext4_jbd2_file_inode(handle, inode);
3946 mark_buffer_dirty(bh);
3947 }
3948 brelse(bh);
3949 if (err)
3950 goto out;
3951 offset = 0;
3952 towrite -= tocopy;
3953 data += tocopy;
3954 blk++;
3955 } 4035 }
4036 lock_buffer(bh);
4037 memcpy(bh->b_data+offset, data, len);
4038 flush_dcache_page(bh->b_page);
4039 unlock_buffer(bh);
4040 if (journal_quota)
4041 err = ext4_handle_dirty_metadata(handle, NULL, bh);
4042 else {
4043 /* Always do at least ordered writes for quotas */
4044 err = ext4_jbd2_file_inode(handle, inode);
4045 mark_buffer_dirty(bh);
4046 }
4047 brelse(bh);
3956out: 4048out:
3957 if (len == towrite) { 4049 if (err) {
3958 mutex_unlock(&inode->i_mutex); 4050 mutex_unlock(&inode->i_mutex);
3959 return err; 4051 return err;
3960 } 4052 }
3961 if (inode->i_size < off+len-towrite) { 4053 if (inode->i_size < off + len) {
3962 i_size_write(inode, off+len-towrite); 4054 i_size_write(inode, off + len);
3963 EXT4_I(inode)->i_disksize = inode->i_size; 4055 EXT4_I(inode)->i_disksize = inode->i_size;
3964 } 4056 }
3965 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 4057 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
3966 ext4_mark_inode_dirty(handle, inode); 4058 ext4_mark_inode_dirty(handle, inode);
3967 mutex_unlock(&inode->i_mutex); 4059 mutex_unlock(&inode->i_mutex);
3968 return len - towrite; 4060 return len;
3969} 4061}
3970 4062
3971#endif 4063#endif
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index ab3a95ee5e7e..b4c5aa8489d8 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -227,7 +227,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
227 ea_bdebug(bh, "b_count=%d, refcount=%d", 227 ea_bdebug(bh, "b_count=%d, refcount=%d",
228 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 228 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
229 if (ext4_xattr_check_block(bh)) { 229 if (ext4_xattr_check_block(bh)) {
230bad_block: ext4_error(inode->i_sb, __func__, 230bad_block:
231 ext4_error(inode->i_sb,
231 "inode %lu: bad block %llu", inode->i_ino, 232 "inode %lu: bad block %llu", inode->i_ino,
232 EXT4_I(inode)->i_file_acl); 233 EXT4_I(inode)->i_file_acl);
233 error = -EIO; 234 error = -EIO;
@@ -267,7 +268,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
267 void *end; 268 void *end;
268 int error; 269 int error;
269 270
270 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) 271 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
271 return -ENODATA; 272 return -ENODATA;
272 error = ext4_get_inode_loc(inode, &iloc); 273 error = ext4_get_inode_loc(inode, &iloc);
273 if (error) 274 if (error)
@@ -371,7 +372,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
371 ea_bdebug(bh, "b_count=%d, refcount=%d", 372 ea_bdebug(bh, "b_count=%d, refcount=%d",
372 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 373 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
373 if (ext4_xattr_check_block(bh)) { 374 if (ext4_xattr_check_block(bh)) {
374 ext4_error(inode->i_sb, __func__, 375 ext4_error(inode->i_sb,
375 "inode %lu: bad block %llu", inode->i_ino, 376 "inode %lu: bad block %llu", inode->i_ino,
376 EXT4_I(inode)->i_file_acl); 377 EXT4_I(inode)->i_file_acl);
377 error = -EIO; 378 error = -EIO;
@@ -396,7 +397,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
396 void *end; 397 void *end;
397 int error; 398 int error;
398 399
399 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) 400 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
400 return 0; 401 return 0;
401 error = ext4_get_inode_loc(inode, &iloc); 402 error = ext4_get_inode_loc(inode, &iloc);
402 if (error) 403 if (error)
@@ -665,9 +666,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
665 atomic_read(&(bs->bh->b_count)), 666 atomic_read(&(bs->bh->b_count)),
666 le32_to_cpu(BHDR(bs->bh)->h_refcount)); 667 le32_to_cpu(BHDR(bs->bh)->h_refcount));
667 if (ext4_xattr_check_block(bs->bh)) { 668 if (ext4_xattr_check_block(bs->bh)) {
668 ext4_error(sb, __func__, 669 ext4_error(sb, "inode %lu: bad block %llu",
669 "inode %lu: bad block %llu", inode->i_ino, 670 inode->i_ino, EXT4_I(inode)->i_file_acl);
670 EXT4_I(inode)->i_file_acl);
671 error = -EIO; 671 error = -EIO;
672 goto cleanup; 672 goto cleanup;
673 } 673 }
@@ -880,9 +880,8 @@ cleanup_dquot:
880 goto cleanup; 880 goto cleanup;
881 881
882bad_block: 882bad_block:
883 ext4_error(inode->i_sb, __func__, 883 ext4_error(inode->i_sb, "inode %lu: bad block %llu",
884 "inode %lu: bad block %llu", inode->i_ino, 884 inode->i_ino, EXT4_I(inode)->i_file_acl);
885 EXT4_I(inode)->i_file_acl);
886 goto cleanup; 885 goto cleanup;
887 886
888#undef header 887#undef header
@@ -908,7 +907,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
908 is->s.base = is->s.first = IFIRST(header); 907 is->s.base = is->s.first = IFIRST(header);
909 is->s.here = is->s.first; 908 is->s.here = is->s.first;
910 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 909 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
911 if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { 910 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
912 error = ext4_xattr_check_names(IFIRST(header), is->s.end); 911 error = ext4_xattr_check_names(IFIRST(header), is->s.end);
913 if (error) 912 if (error)
914 return error; 913 return error;
@@ -940,10 +939,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
940 header = IHDR(inode, ext4_raw_inode(&is->iloc)); 939 header = IHDR(inode, ext4_raw_inode(&is->iloc));
941 if (!IS_LAST_ENTRY(s->first)) { 940 if (!IS_LAST_ENTRY(s->first)) {
942 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); 941 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
943 EXT4_I(inode)->i_state |= EXT4_STATE_XATTR; 942 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
944 } else { 943 } else {
945 header->h_magic = cpu_to_le32(0); 944 header->h_magic = cpu_to_le32(0);
946 EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR; 945 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
947 } 946 }
948 return 0; 947 return 0;
949} 948}
@@ -986,8 +985,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
986 if (strlen(name) > 255) 985 if (strlen(name) > 255)
987 return -ERANGE; 986 return -ERANGE;
988 down_write(&EXT4_I(inode)->xattr_sem); 987 down_write(&EXT4_I(inode)->xattr_sem);
989 no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND; 988 no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
990 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 989 ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
991 990
992 error = ext4_get_inode_loc(inode, &is.iloc); 991 error = ext4_get_inode_loc(inode, &is.iloc);
993 if (error) 992 if (error)
@@ -997,10 +996,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
997 if (error) 996 if (error)
998 goto cleanup; 997 goto cleanup;
999 998
1000 if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) { 999 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
1001 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); 1000 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
1002 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 1001 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
1003 EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW; 1002 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
1004 } 1003 }
1005 1004
1006 error = ext4_xattr_ibody_find(inode, &i, &is); 1005 error = ext4_xattr_ibody_find(inode, &i, &is);
@@ -1052,7 +1051,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1052 ext4_xattr_update_super_block(handle, inode->i_sb); 1051 ext4_xattr_update_super_block(handle, inode->i_sb);
1053 inode->i_ctime = ext4_current_time(inode); 1052 inode->i_ctime = ext4_current_time(inode);
1054 if (!value) 1053 if (!value)
1055 EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; 1054 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1056 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); 1055 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
1057 /* 1056 /*
1058 * The bh is consumed by ext4_mark_iloc_dirty, even with 1057 * The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1067,7 +1066,7 @@ cleanup:
1067 brelse(is.iloc.bh); 1066 brelse(is.iloc.bh);
1068 brelse(bs.bh); 1067 brelse(bs.bh);
1069 if (no_expand == 0) 1068 if (no_expand == 0)
1070 EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; 1069 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1071 up_write(&EXT4_I(inode)->xattr_sem); 1070 up_write(&EXT4_I(inode)->xattr_sem);
1072 return error; 1071 return error;
1073} 1072}
@@ -1195,9 +1194,8 @@ retry:
1195 if (!bh) 1194 if (!bh)
1196 goto cleanup; 1195 goto cleanup;
1197 if (ext4_xattr_check_block(bh)) { 1196 if (ext4_xattr_check_block(bh)) {
1198 ext4_error(inode->i_sb, __func__, 1197 ext4_error(inode->i_sb, "inode %lu: bad block %llu",
1199 "inode %lu: bad block %llu", inode->i_ino, 1198 inode->i_ino, EXT4_I(inode)->i_file_acl);
1200 EXT4_I(inode)->i_file_acl);
1201 error = -EIO; 1199 error = -EIO;
1202 goto cleanup; 1200 goto cleanup;
1203 } 1201 }
@@ -1302,6 +1300,8 @@ retry:
1302 1300
1303 /* Remove the chosen entry from the inode */ 1301 /* Remove the chosen entry from the inode */
1304 error = ext4_xattr_ibody_set(handle, inode, &i, is); 1302 error = ext4_xattr_ibody_set(handle, inode, &i, is);
1303 if (error)
1304 goto cleanup;
1305 1305
1306 entry = IFIRST(header); 1306 entry = IFIRST(header);
1307 if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) 1307 if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
@@ -1372,16 +1372,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
1372 goto cleanup; 1372 goto cleanup;
1373 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); 1373 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1374 if (!bh) { 1374 if (!bh) {
1375 ext4_error(inode->i_sb, __func__, 1375 ext4_error(inode->i_sb, "inode %lu: block %llu read error",
1376 "inode %lu: block %llu read error", inode->i_ino, 1376 inode->i_ino, EXT4_I(inode)->i_file_acl);
1377 EXT4_I(inode)->i_file_acl);
1378 goto cleanup; 1377 goto cleanup;
1379 } 1378 }
1380 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 1379 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1381 BHDR(bh)->h_blocks != cpu_to_le32(1)) { 1380 BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1382 ext4_error(inode->i_sb, __func__, 1381 ext4_error(inode->i_sb, "inode %lu: bad block %llu",
1383 "inode %lu: bad block %llu", inode->i_ino, 1382 inode->i_ino, EXT4_I(inode)->i_file_acl);
1384 EXT4_I(inode)->i_file_acl);
1385 goto cleanup; 1383 goto cleanup;
1386 } 1384 }
1387 ext4_xattr_release_block(handle, inode, bh); 1385 ext4_xattr_release_block(handle, inode, bh);
@@ -1506,7 +1504,7 @@ again:
1506 } 1504 }
1507 bh = sb_bread(inode->i_sb, ce->e_block); 1505 bh = sb_bread(inode->i_sb, ce->e_block);
1508 if (!bh) { 1506 if (!bh) {
1509 ext4_error(inode->i_sb, __func__, 1507 ext4_error(inode->i_sb,
1510 "inode %lu: block %lu read error", 1508 "inode %lu: block %lu read error",
1511 inode->i_ino, (unsigned long) ce->e_block); 1509 inode->i_ino, (unsigned long) ce->e_block);
1512 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= 1510 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 14da530b05ca..fbeecdc194dc 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -577,7 +577,7 @@ static inline loff_t fat_i_pos_read(struct msdos_sb_info *sbi,
577 return i_pos; 577 return i_pos;
578} 578}
579 579
580static int fat_write_inode(struct inode *inode, int wait) 580static int __fat_write_inode(struct inode *inode, int wait)
581{ 581{
582 struct super_block *sb = inode->i_sb; 582 struct super_block *sb = inode->i_sb;
583 struct msdos_sb_info *sbi = MSDOS_SB(sb); 583 struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -634,9 +634,14 @@ retry:
634 return err; 634 return err;
635} 635}
636 636
637static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
638{
639 return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
640}
641
637int fat_sync_inode(struct inode *inode) 642int fat_sync_inode(struct inode *inode)
638{ 643{
639 return fat_write_inode(inode, 1); 644 return __fat_write_inode(inode, 1);
640} 645}
641 646
642EXPORT_SYMBOL_GPL(fat_sync_inode); 647EXPORT_SYMBOL_GPL(fat_sync_inode);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1a7c42c64ff4..76fc4d594acb 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -381,10 +381,10 @@ static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
381 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 381 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
382} 382}
383 383
384static int write_inode(struct inode *inode, int sync) 384static int write_inode(struct inode *inode, struct writeback_control *wbc)
385{ 385{
386 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 386 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
387 return inode->i_sb->s_op->write_inode(inode, sync); 387 return inode->i_sb->s_op->write_inode(inode, wbc);
388 return 0; 388 return 0;
389} 389}
390 390
@@ -421,7 +421,6 @@ static int
421writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 421writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
422{ 422{
423 struct address_space *mapping = inode->i_mapping; 423 struct address_space *mapping = inode->i_mapping;
424 int wait = wbc->sync_mode == WB_SYNC_ALL;
425 unsigned dirty; 424 unsigned dirty;
426 int ret; 425 int ret;
427 426
@@ -439,7 +438,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
439 * We'll have another go at writing back this inode when we 438 * We'll have another go at writing back this inode when we
440 * completed a full scan of b_io. 439 * completed a full scan of b_io.
441 */ 440 */
442 if (!wait) { 441 if (wbc->sync_mode != WB_SYNC_ALL) {
443 requeue_io(inode); 442 requeue_io(inode);
444 return 0; 443 return 0;
445 } 444 }
@@ -461,15 +460,20 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
461 460
462 ret = do_writepages(mapping, wbc); 461 ret = do_writepages(mapping, wbc);
463 462
464 /* Don't write the inode if only I_DIRTY_PAGES was set */ 463 /*
465 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 464 * Make sure to wait on the data before writing out the metadata.
466 int err = write_inode(inode, wait); 465 * This is important for filesystems that modify metadata on data
466 * I/O completion.
467 */
468 if (wbc->sync_mode == WB_SYNC_ALL) {
469 int err = filemap_fdatawait(mapping);
467 if (ret == 0) 470 if (ret == 0)
468 ret = err; 471 ret = err;
469 } 472 }
470 473
471 if (wait) { 474 /* Don't write the inode if only I_DIRTY_PAGES was set */
472 int err = filemap_fdatawait(mapping); 475 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
476 int err = write_inode(inode, wbc);
473 if (ret == 0) 477 if (ret == 0)
474 ret = err; 478 ret = err;
475 } 479 }
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51d9e33d634f..eb7e9423691f 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -865,13 +865,10 @@ static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
865 865
866 down_read(&fc->killsb); 866 down_read(&fc->killsb);
867 err = -ENOENT; 867 err = -ENOENT;
868 if (!fc->sb) 868 if (fc->sb) {
869 goto err_unlock; 869 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
870 870 outarg.off, outarg.len);
871 err = fuse_reverse_inval_inode(fc->sb, outarg.ino, 871 }
872 outarg.off, outarg.len);
873
874err_unlock:
875 up_read(&fc->killsb); 872 up_read(&fc->killsb);
876 return err; 873 return err;
877 874
@@ -884,10 +881,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
884 struct fuse_copy_state *cs) 881 struct fuse_copy_state *cs)
885{ 882{
886 struct fuse_notify_inval_entry_out outarg; 883 struct fuse_notify_inval_entry_out outarg;
887 int err = -EINVAL; 884 int err = -ENOMEM;
888 char buf[FUSE_NAME_MAX+1]; 885 char *buf;
889 struct qstr name; 886 struct qstr name;
890 887
888 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
889 if (!buf)
890 goto err;
891
892 err = -EINVAL;
891 if (size < sizeof(outarg)) 893 if (size < sizeof(outarg))
892 goto err; 894 goto err;
893 895
@@ -910,16 +912,14 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
910 912
911 down_read(&fc->killsb); 913 down_read(&fc->killsb);
912 err = -ENOENT; 914 err = -ENOENT;
913 if (!fc->sb) 915 if (fc->sb)
914 goto err_unlock; 916 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
915
916 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
917
918err_unlock:
919 up_read(&fc->killsb); 917 up_read(&fc->killsb);
918 kfree(buf);
920 return err; 919 return err;
921 920
922err: 921err:
922 kfree(buf);
923 fuse_copy_finish(cs); 923 fuse_copy_finish(cs);
924 return err; 924 return err;
925} 925}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7b8da9415267..0c1d0b82dcf1 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1061,8 +1061,8 @@ out:
1061 1061
1062int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 1062int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1063{ 1063{
1064 struct inode *aspace = page->mapping->host; 1064 struct address_space *mapping = page->mapping;
1065 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info; 1065 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
1066 struct buffer_head *bh, *head; 1066 struct buffer_head *bh, *head;
1067 struct gfs2_bufdata *bd; 1067 struct gfs2_bufdata *bd;
1068 1068
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f42663325931..454d4b4eb36b 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,7 +19,6 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/wait.h> 20#include <linux/wait.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/rwsem.h>
23#include <asm/uaccess.h> 22#include <asm/uaccess.h>
24#include <linux/seq_file.h> 23#include <linux/seq_file.h>
25#include <linux/debugfs.h> 24#include <linux/debugfs.h>
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) 59#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62 61
63static DECLARE_RWSEM(gfs2_umount_flush_sem);
64static struct dentry *gfs2_root; 62static struct dentry *gfs2_root;
65static struct workqueue_struct *glock_workqueue; 63static struct workqueue_struct *glock_workqueue;
66struct workqueue_struct *gfs2_delete_workqueue; 64struct workqueue_struct *gfs2_delete_workqueue;
@@ -154,12 +152,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
154static void glock_free(struct gfs2_glock *gl) 152static void glock_free(struct gfs2_glock *gl)
155{ 153{
156 struct gfs2_sbd *sdp = gl->gl_sbd; 154 struct gfs2_sbd *sdp = gl->gl_sbd;
157 struct inode *aspace = gl->gl_aspace; 155 struct address_space *mapping = gfs2_glock2aspace(gl);
156 struct kmem_cache *cachep = gfs2_glock_cachep;
158 157
159 if (aspace) 158 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
160 gfs2_aspace_put(aspace);
161 trace_gfs2_glock_put(gl); 159 trace_gfs2_glock_put(gl);
162 sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); 160 if (mapping)
161 cachep = gfs2_glock_aspace_cachep;
162 sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
163} 163}
164 164
165/** 165/**
@@ -712,7 +712,6 @@ static void glock_work_func(struct work_struct *work)
712 finish_xmote(gl, gl->gl_reply); 712 finish_xmote(gl, gl->gl_reply);
713 drop_ref = 1; 713 drop_ref = 1;
714 } 714 }
715 down_read(&gfs2_umount_flush_sem);
716 spin_lock(&gl->gl_spin); 715 spin_lock(&gl->gl_spin);
717 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 716 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
718 gl->gl_state != LM_ST_UNLOCKED && 717 gl->gl_state != LM_ST_UNLOCKED &&
@@ -725,7 +724,6 @@ static void glock_work_func(struct work_struct *work)
725 } 724 }
726 run_queue(gl, 0); 725 run_queue(gl, 0);
727 spin_unlock(&gl->gl_spin); 726 spin_unlock(&gl->gl_spin);
728 up_read(&gfs2_umount_flush_sem);
729 if (!delay || 727 if (!delay ||
730 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 728 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
731 gfs2_glock_put(gl); 729 gfs2_glock_put(gl);
@@ -750,10 +748,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
750 const struct gfs2_glock_operations *glops, int create, 748 const struct gfs2_glock_operations *glops, int create,
751 struct gfs2_glock **glp) 749 struct gfs2_glock **glp)
752{ 750{
751 struct super_block *s = sdp->sd_vfs;
753 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 752 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
754 struct gfs2_glock *gl, *tmp; 753 struct gfs2_glock *gl, *tmp;
755 unsigned int hash = gl_hash(sdp, &name); 754 unsigned int hash = gl_hash(sdp, &name);
756 int error; 755 struct address_space *mapping;
757 756
758 read_lock(gl_lock_addr(hash)); 757 read_lock(gl_lock_addr(hash));
759 gl = search_bucket(hash, sdp, &name); 758 gl = search_bucket(hash, sdp, &name);
@@ -765,7 +764,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
765 if (!create) 764 if (!create)
766 return -ENOENT; 765 return -ENOENT;
767 766
768 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); 767 if (glops->go_flags & GLOF_ASPACE)
768 gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
769 else
770 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
769 if (!gl) 771 if (!gl)
770 return -ENOMEM; 772 return -ENOMEM;
771 773
@@ -784,18 +786,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
784 gl->gl_tchange = jiffies; 786 gl->gl_tchange = jiffies;
785 gl->gl_object = NULL; 787 gl->gl_object = NULL;
786 gl->gl_sbd = sdp; 788 gl->gl_sbd = sdp;
787 gl->gl_aspace = NULL;
788 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 789 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
789 INIT_WORK(&gl->gl_delete, delete_work_func); 790 INIT_WORK(&gl->gl_delete, delete_work_func);
790 791
791 /* If this glock protects actual on-disk data or metadata blocks, 792 mapping = gfs2_glock2aspace(gl);
792 create a VFS inode to manage the pages/buffers holding them. */ 793 if (mapping) {
793 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { 794 mapping->a_ops = &gfs2_meta_aops;
794 gl->gl_aspace = gfs2_aspace_get(sdp); 795 mapping->host = s->s_bdev->bd_inode;
795 if (!gl->gl_aspace) { 796 mapping->flags = 0;
796 error = -ENOMEM; 797 mapping_set_gfp_mask(mapping, GFP_NOFS);
797 goto fail; 798 mapping->assoc_mapping = NULL;
798 } 799 mapping->backing_dev_info = s->s_bdi;
800 mapping->writeback_index = 0;
799 } 801 }
800 802
801 write_lock(gl_lock_addr(hash)); 803 write_lock(gl_lock_addr(hash));
@@ -812,10 +814,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
812 *glp = gl; 814 *glp = gl;
813 815
814 return 0; 816 return 0;
815
816fail:
817 kmem_cache_free(gfs2_glock_cachep, gl);
818 return error;
819} 817}
820 818
821/** 819/**
@@ -1510,35 +1508,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1510 1508
1511void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1509void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1512{ 1510{
1513 unsigned long t;
1514 unsigned int x; 1511 unsigned int x;
1515 int cont;
1516 1512
1517 t = jiffies; 1513 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1518 1514 examine_bucket(clear_glock, sdp, x);
1519 for (;;) {
1520 cont = 0;
1521 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1522 if (examine_bucket(clear_glock, sdp, x))
1523 cont = 1;
1524 }
1525
1526 if (!cont)
1527 break;
1528
1529 if (time_after_eq(jiffies,
1530 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1531 fs_warn(sdp, "Unmount seems to be stalled. "
1532 "Dumping lock state...\n");
1533 gfs2_dump_lockstate(sdp);
1534 t = jiffies;
1535 }
1536
1537 down_write(&gfs2_umount_flush_sem);
1538 invalidate_inodes(sdp->sd_vfs);
1539 up_write(&gfs2_umount_flush_sem);
1540 msleep(10);
1541 }
1542 flush_workqueue(glock_workqueue); 1515 flush_workqueue(glock_workqueue);
1543 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); 1516 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1544 gfs2_dump_lockstate(sdp); 1517 gfs2_dump_lockstate(sdp);
@@ -1685,7 +1658,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1685 dtime *= 1000000/HZ; /* demote time in uSec */ 1658 dtime *= 1000000/HZ; /* demote time in uSec */
1686 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 1659 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1687 dtime = 0; 1660 dtime = 0;
1688 gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n", 1661 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
1689 state2str(gl->gl_state), 1662 state2str(gl->gl_state),
1690 gl->gl_name.ln_type, 1663 gl->gl_name.ln_type,
1691 (unsigned long long)gl->gl_name.ln_number, 1664 (unsigned long long)gl->gl_name.ln_number,
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c0262faf4725..2bda1911b156 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,6 +180,13 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
180 return gl->gl_state == LM_ST_SHARED; 180 return gl->gl_state == LM_ST_SHARED;
181} 181}
182 182
183static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
184{
185 if (gl->gl_ops->go_flags & GLOF_ASPACE)
186 return (struct address_space *)(gl + 1);
187 return NULL;
188}
189
183int gfs2_glock_get(struct gfs2_sbd *sdp, 190int gfs2_glock_get(struct gfs2_sbd *sdp,
184 u64 number, const struct gfs2_glock_operations *glops, 191 u64 number, const struct gfs2_glock_operations *glops,
185 int create, struct gfs2_glock **glp); 192 int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 78554acc0605..38e3749d476c 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -87,7 +87,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
87 87
88static void rgrp_go_sync(struct gfs2_glock *gl) 88static void rgrp_go_sync(struct gfs2_glock *gl)
89{ 89{
90 struct address_space *metamapping = gl->gl_aspace->i_mapping; 90 struct address_space *metamapping = gfs2_glock2aspace(gl);
91 int error; 91 int error;
92 92
93 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 93 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
@@ -113,7 +113,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
113 113
114static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 114static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
115{ 115{
116 struct address_space *mapping = gl->gl_aspace->i_mapping; 116 struct address_space *mapping = gfs2_glock2aspace(gl);
117 117
118 BUG_ON(!(flags & DIO_METADATA)); 118 BUG_ON(!(flags & DIO_METADATA));
119 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 119 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
@@ -134,7 +134,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
134static void inode_go_sync(struct gfs2_glock *gl) 134static void inode_go_sync(struct gfs2_glock *gl)
135{ 135{
136 struct gfs2_inode *ip = gl->gl_object; 136 struct gfs2_inode *ip = gl->gl_object;
137 struct address_space *metamapping = gl->gl_aspace->i_mapping; 137 struct address_space *metamapping = gfs2_glock2aspace(gl);
138 int error; 138 int error;
139 139
140 if (ip && !S_ISREG(ip->i_inode.i_mode)) 140 if (ip && !S_ISREG(ip->i_inode.i_mode))
@@ -183,7 +183,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
183 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 183 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
184 184
185 if (flags & DIO_METADATA) { 185 if (flags & DIO_METADATA) {
186 struct address_space *mapping = gl->gl_aspace->i_mapping; 186 struct address_space *mapping = gfs2_glock2aspace(gl);
187 truncate_inode_pages(mapping, 0); 187 truncate_inode_pages(mapping, 0);
188 if (ip) { 188 if (ip) {
189 set_bit(GIF_INVALID, &ip->i_flags); 189 set_bit(GIF_INVALID, &ip->i_flags);
@@ -282,7 +282,8 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
282 282
283static int rgrp_go_demote_ok(const struct gfs2_glock *gl) 283static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
284{ 284{
285 return !gl->gl_aspace->i_mapping->nrpages; 285 const struct address_space *mapping = (const struct address_space *)(gl + 1);
286 return !mapping->nrpages;
286} 287}
287 288
288/** 289/**
@@ -387,8 +388,7 @@ static void iopen_go_callback(struct gfs2_glock *gl)
387 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 388 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
388 389
389 if (gl->gl_demote_state == LM_ST_UNLOCKED && 390 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
390 gl->gl_state == LM_ST_SHARED && 391 gl->gl_state == LM_ST_SHARED && ip) {
391 ip && test_bit(GIF_USER, &ip->i_flags)) {
392 gfs2_glock_hold(gl); 392 gfs2_glock_hold(gl);
393 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 393 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
394 gfs2_glock_put_nolock(gl); 394 gfs2_glock_put_nolock(gl);
@@ -407,6 +407,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
407 .go_dump = inode_go_dump, 407 .go_dump = inode_go_dump,
408 .go_type = LM_TYPE_INODE, 408 .go_type = LM_TYPE_INODE,
409 .go_min_hold_time = HZ / 5, 409 .go_min_hold_time = HZ / 5,
410 .go_flags = GLOF_ASPACE,
410}; 411};
411 412
412const struct gfs2_glock_operations gfs2_rgrp_glops = { 413const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -418,6 +419,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
418 .go_dump = gfs2_rgrp_dump, 419 .go_dump = gfs2_rgrp_dump,
419 .go_type = LM_TYPE_RGRP, 420 .go_type = LM_TYPE_RGRP,
420 .go_min_hold_time = HZ / 5, 421 .go_min_hold_time = HZ / 5,
422 .go_flags = GLOF_ASPACE,
421}; 423};
422 424
423const struct gfs2_glock_operations gfs2_trans_glops = { 425const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index bc0ad158e6b4..b8025e51cabf 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -162,6 +162,8 @@ struct gfs2_glock_operations {
162 void (*go_callback) (struct gfs2_glock *gl); 162 void (*go_callback) (struct gfs2_glock *gl);
163 const int go_type; 163 const int go_type;
164 const unsigned long go_min_hold_time; 164 const unsigned long go_min_hold_time;
165 const unsigned long go_flags;
166#define GLOF_ASPACE 1
165}; 167};
166 168
167enum { 169enum {
@@ -225,7 +227,6 @@ struct gfs2_glock {
225 227
226 struct gfs2_sbd *gl_sbd; 228 struct gfs2_sbd *gl_sbd;
227 229
228 struct inode *gl_aspace;
229 struct list_head gl_ail_list; 230 struct list_head gl_ail_list;
230 atomic_t gl_ail_count; 231 atomic_t gl_ail_count;
231 struct delayed_work gl_work; 232 struct delayed_work gl_work;
@@ -258,7 +259,6 @@ enum {
258 GIF_INVALID = 0, 259 GIF_INVALID = 0,
259 GIF_QD_LOCKED = 1, 260 GIF_QD_LOCKED = 1,
260 GIF_SW_PAGED = 3, 261 GIF_SW_PAGED = 3,
261 GIF_USER = 4, /* user inode, not metadata addr space */
262}; 262};
263 263
264 264
@@ -451,7 +451,6 @@ struct gfs2_tune {
451 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ 451 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
452 unsigned int gt_new_files_jdata; 452 unsigned int gt_new_files_jdata;
453 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ 453 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
454 unsigned int gt_stall_secs; /* Detects trouble! */
455 unsigned int gt_complain_secs; 454 unsigned int gt_complain_secs;
456 unsigned int gt_statfs_quantum; 455 unsigned int gt_statfs_quantum;
457 unsigned int gt_statfs_slow; 456 unsigned int gt_statfs_slow;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6e220f4eee7d..b1bf2694fb2b 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -45,7 +45,7 @@ static int iget_test(struct inode *inode, void *opaque)
45 struct gfs2_inode *ip = GFS2_I(inode); 45 struct gfs2_inode *ip = GFS2_I(inode);
46 u64 *no_addr = opaque; 46 u64 *no_addr = opaque;
47 47
48 if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags)) 48 if (ip->i_no_addr == *no_addr)
49 return 1; 49 return 1;
50 50
51 return 0; 51 return 0;
@@ -58,7 +58,6 @@ static int iget_set(struct inode *inode, void *opaque)
58 58
59 inode->i_ino = (unsigned long)*no_addr; 59 inode->i_ino = (unsigned long)*no_addr;
60 ip->i_no_addr = *no_addr; 60 ip->i_no_addr = *no_addr;
61 set_bit(GIF_USER, &ip->i_flags);
62 return 0; 61 return 0;
63} 62}
64 63
@@ -84,7 +83,7 @@ static int iget_skip_test(struct inode *inode, void *opaque)
84 struct gfs2_inode *ip = GFS2_I(inode); 83 struct gfs2_inode *ip = GFS2_I(inode);
85 struct gfs2_skip_data *data = opaque; 84 struct gfs2_skip_data *data = opaque;
86 85
87 if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){ 86 if (ip->i_no_addr == data->no_addr) {
88 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){ 87 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
89 data->skipped = 1; 88 data->skipped = 1;
90 return 0; 89 return 0;
@@ -103,7 +102,6 @@ static int iget_skip_set(struct inode *inode, void *opaque)
103 return 1; 102 return 1;
104 inode->i_ino = (unsigned long)(data->no_addr); 103 inode->i_ino = (unsigned long)(data->no_addr);
105 ip->i_no_addr = data->no_addr; 104 ip->i_no_addr = data->no_addr;
106 set_bit(GIF_USER, &ip->i_flags);
107 return 0; 105 return 0;
108} 106}
109 107
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 0e5e0e7022e5..569b46240f61 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -30,7 +30,10 @@ static void gdlm_ast(void *arg)
30 30
31 switch (gl->gl_lksb.sb_status) { 31 switch (gl->gl_lksb.sb_status) {
32 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ 32 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
33 kmem_cache_free(gfs2_glock_cachep, gl); 33 if (gl->gl_ops->go_flags & GLOF_ASPACE)
34 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
35 else
36 kmem_cache_free(gfs2_glock_cachep, gl);
34 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 37 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
35 wake_up(&sdp->sd_glock_wait); 38 wake_up(&sdp->sd_glock_wait);
36 return; 39 return;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index de97632ba32f..adc260fbea90 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -528,9 +528,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
528 gfs2_pin(sdp, bd->bd_bh); 528 gfs2_pin(sdp, bd->bd_bh);
529 tr->tr_num_databuf_new++; 529 tr->tr_num_databuf_new++;
530 sdp->sd_log_num_databuf++; 530 sdp->sd_log_num_databuf++;
531 list_add(&le->le_list, &sdp->sd_log_le_databuf); 531 list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
532 } else { 532 } else {
533 list_add(&le->le_list, &sdp->sd_log_le_ordered); 533 list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
534 } 534 }
535out: 535out:
536 gfs2_log_unlock(sdp); 536 gfs2_log_unlock(sdp);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 5b31f7741a8f..a88fadc704bb 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -52,6 +52,22 @@ static void gfs2_init_glock_once(void *foo)
52 atomic_set(&gl->gl_ail_count, 0); 52 atomic_set(&gl->gl_ail_count, 0);
53} 53}
54 54
55static void gfs2_init_gl_aspace_once(void *foo)
56{
57 struct gfs2_glock *gl = foo;
58 struct address_space *mapping = (struct address_space *)(gl + 1);
59
60 gfs2_init_glock_once(gl);
61 memset(mapping, 0, sizeof(*mapping));
62 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
63 spin_lock_init(&mapping->tree_lock);
64 spin_lock_init(&mapping->i_mmap_lock);
65 INIT_LIST_HEAD(&mapping->private_list);
66 spin_lock_init(&mapping->private_lock);
67 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
68 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
69}
70
55/** 71/**
56 * init_gfs2_fs - Register GFS2 as a filesystem 72 * init_gfs2_fs - Register GFS2 as a filesystem
57 * 73 *
@@ -78,6 +94,14 @@ static int __init init_gfs2_fs(void)
78 if (!gfs2_glock_cachep) 94 if (!gfs2_glock_cachep)
79 goto fail; 95 goto fail;
80 96
97 gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)",
98 sizeof(struct gfs2_glock) +
99 sizeof(struct address_space),
100 0, 0, gfs2_init_gl_aspace_once);
101
102 if (!gfs2_glock_aspace_cachep)
103 goto fail;
104
81 gfs2_inode_cachep = kmem_cache_create("gfs2_inode", 105 gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
82 sizeof(struct gfs2_inode), 106 sizeof(struct gfs2_inode),
83 0, SLAB_RECLAIM_ACCOUNT| 107 0, SLAB_RECLAIM_ACCOUNT|
@@ -144,6 +168,9 @@ fail:
144 if (gfs2_inode_cachep) 168 if (gfs2_inode_cachep)
145 kmem_cache_destroy(gfs2_inode_cachep); 169 kmem_cache_destroy(gfs2_inode_cachep);
146 170
171 if (gfs2_glock_aspace_cachep)
172 kmem_cache_destroy(gfs2_glock_aspace_cachep);
173
147 if (gfs2_glock_cachep) 174 if (gfs2_glock_cachep)
148 kmem_cache_destroy(gfs2_glock_cachep); 175 kmem_cache_destroy(gfs2_glock_cachep);
149 176
@@ -169,6 +196,7 @@ static void __exit exit_gfs2_fs(void)
169 kmem_cache_destroy(gfs2_rgrpd_cachep); 196 kmem_cache_destroy(gfs2_rgrpd_cachep);
170 kmem_cache_destroy(gfs2_bufdata_cachep); 197 kmem_cache_destroy(gfs2_bufdata_cachep);
171 kmem_cache_destroy(gfs2_inode_cachep); 198 kmem_cache_destroy(gfs2_inode_cachep);
199 kmem_cache_destroy(gfs2_glock_aspace_cachep);
172 kmem_cache_destroy(gfs2_glock_cachep); 200 kmem_cache_destroy(gfs2_glock_cachep);
173 201
174 gfs2_sys_uninit(); 202 gfs2_sys_uninit();
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 6f68a5f18eb8..0bb12c80937a 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -93,49 +93,13 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
93 return err; 93 return err;
94} 94}
95 95
96static const struct address_space_operations aspace_aops = { 96const struct address_space_operations gfs2_meta_aops = {
97 .writepage = gfs2_aspace_writepage, 97 .writepage = gfs2_aspace_writepage,
98 .releasepage = gfs2_releasepage, 98 .releasepage = gfs2_releasepage,
99 .sync_page = block_sync_page, 99 .sync_page = block_sync_page,
100}; 100};
101 101
102/** 102/**
103 * gfs2_aspace_get - Create and initialize a struct inode structure
104 * @sdp: the filesystem the aspace is in
105 *
106 * Right now a struct inode is just a struct inode. Maybe Linux
107 * will supply a more lightweight address space construct (that works)
108 * in the future.
109 *
110 * Make sure pages/buffers in this aspace aren't in high memory.
111 *
112 * Returns: the aspace
113 */
114
115struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
116{
117 struct inode *aspace;
118 struct gfs2_inode *ip;
119
120 aspace = new_inode(sdp->sd_vfs);
121 if (aspace) {
122 mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
123 aspace->i_mapping->a_ops = &aspace_aops;
124 aspace->i_size = MAX_LFS_FILESIZE;
125 ip = GFS2_I(aspace);
126 clear_bit(GIF_USER, &ip->i_flags);
127 insert_inode_hash(aspace);
128 }
129 return aspace;
130}
131
132void gfs2_aspace_put(struct inode *aspace)
133{
134 remove_inode_hash(aspace);
135 iput(aspace);
136}
137
138/**
139 * gfs2_meta_sync - Sync all buffers associated with a glock 103 * gfs2_meta_sync - Sync all buffers associated with a glock
140 * @gl: The glock 104 * @gl: The glock
141 * 105 *
@@ -143,7 +107,7 @@ void gfs2_aspace_put(struct inode *aspace)
143 107
144void gfs2_meta_sync(struct gfs2_glock *gl) 108void gfs2_meta_sync(struct gfs2_glock *gl)
145{ 109{
146 struct address_space *mapping = gl->gl_aspace->i_mapping; 110 struct address_space *mapping = gfs2_glock2aspace(gl);
147 int error; 111 int error;
148 112
149 filemap_fdatawrite(mapping); 113 filemap_fdatawrite(mapping);
@@ -164,7 +128,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
164 128
165struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 129struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
166{ 130{
167 struct address_space *mapping = gl->gl_aspace->i_mapping; 131 struct address_space *mapping = gfs2_glock2aspace(gl);
168 struct gfs2_sbd *sdp = gl->gl_sbd; 132 struct gfs2_sbd *sdp = gl->gl_sbd;
169 struct page *page; 133 struct page *page;
170 struct buffer_head *bh; 134 struct buffer_head *bh;
@@ -344,8 +308,10 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
344 308
345void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta) 309void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
346{ 310{
347 struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host); 311 struct address_space *mapping = bh->b_page->mapping;
312 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
348 struct gfs2_bufdata *bd = bh->b_private; 313 struct gfs2_bufdata *bd = bh->b_private;
314
349 if (test_clear_buffer_pinned(bh)) { 315 if (test_clear_buffer_pinned(bh)) {
350 list_del_init(&bd->bd_le.le_list); 316 list_del_init(&bd->bd_le.le_list);
351 if (meta) { 317 if (meta) {
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index de270c2f9b63..6a1d9ba16411 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -37,8 +37,16 @@ static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
37 0, from_head - to_head); 37 0, from_head - to_head);
38} 38}
39 39
40struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp); 40extern const struct address_space_operations gfs2_meta_aops;
41void gfs2_aspace_put(struct inode *aspace); 41
42static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
43{
44 struct inode *inode = mapping->host;
45 if (mapping->a_ops == &gfs2_meta_aops)
46 return (((struct gfs2_glock *)mapping) - 1)->gl_sbd;
47 else
48 return inode->i_sb->s_fs_info;
49}
42 50
43void gfs2_meta_sync(struct gfs2_glock *gl); 51void gfs2_meta_sync(struct gfs2_glock *gl);
44 52
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index a86ed6381566..a054b526dc08 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -65,7 +65,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
65 gt->gt_quota_scale_den = 1; 65 gt->gt_quota_scale_den = 1;
66 gt->gt_new_files_jdata = 0; 66 gt->gt_new_files_jdata = 0;
67 gt->gt_max_readahead = 1 << 18; 67 gt->gt_max_readahead = 1 << 18;
68 gt->gt_stall_secs = 600;
69 gt->gt_complain_secs = 10; 68 gt->gt_complain_secs = 10;
70} 69}
71 70
@@ -1241,10 +1240,9 @@ fail_sb:
1241fail_locking: 1240fail_locking:
1242 init_locking(sdp, &mount_gh, UNDO); 1241 init_locking(sdp, &mount_gh, UNDO);
1243fail_lm: 1242fail_lm:
1243 invalidate_inodes(sb);
1244 gfs2_gl_hash_clear(sdp); 1244 gfs2_gl_hash_clear(sdp);
1245 gfs2_lm_unmount(sdp); 1245 gfs2_lm_unmount(sdp);
1246 while (invalidate_inodes(sb))
1247 yield();
1248fail_sys: 1246fail_sys:
1249 gfs2_sys_fs_del(sdp); 1247 gfs2_sys_fs_del(sdp);
1250fail: 1248fail:
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 84350e1be66d..4e64352d49de 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -976,122 +976,62 @@ out:
976} 976}
977 977
978/** 978/**
979 * gfs2_readlinki - return the contents of a symlink 979 * gfs2_follow_link - Follow a symbolic link
980 * @ip: the symlink's inode 980 * @dentry: The dentry of the link
981 * @buf: a pointer to the buffer to be filled 981 * @nd: Data that we pass to vfs_follow_link()
982 * @len: a pointer to the length of @buf
983 * 982 *
984 * If @buf is too small, a piece of memory is kmalloc()ed and needs 983 * This can handle symlinks of any size.
985 * to be freed by the caller.
986 * 984 *
987 * Returns: errno 985 * Returns: 0 on success or error code
988 */ 986 */
989 987
990static int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len) 988static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
991{ 989{
990 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
992 struct gfs2_holder i_gh; 991 struct gfs2_holder i_gh;
993 struct buffer_head *dibh; 992 struct buffer_head *dibh;
994 unsigned int x; 993 unsigned int x;
994 char *buf;
995 int error; 995 int error;
996 996
997 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 997 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
998 error = gfs2_glock_nq(&i_gh); 998 error = gfs2_glock_nq(&i_gh);
999 if (error) { 999 if (error) {
1000 gfs2_holder_uninit(&i_gh); 1000 gfs2_holder_uninit(&i_gh);
1001 return error; 1001 nd_set_link(nd, ERR_PTR(error));
1002 return NULL;
1002 } 1003 }
1003 1004
1004 if (!ip->i_disksize) { 1005 if (!ip->i_disksize) {
1005 gfs2_consist_inode(ip); 1006 gfs2_consist_inode(ip);
1006 error = -EIO; 1007 buf = ERR_PTR(-EIO);
1007 goto out; 1008 goto out;
1008 } 1009 }
1009 1010
1010 error = gfs2_meta_inode_buffer(ip, &dibh); 1011 error = gfs2_meta_inode_buffer(ip, &dibh);
1011 if (error) 1012 if (error) {
1013 buf = ERR_PTR(error);
1012 goto out; 1014 goto out;
1013
1014 x = ip->i_disksize + 1;
1015 if (x > *len) {
1016 *buf = kmalloc(x, GFP_NOFS);
1017 if (!*buf) {
1018 error = -ENOMEM;
1019 goto out_brelse;
1020 }
1021 } 1015 }
1022 1016
1023 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x); 1017 x = ip->i_disksize + 1;
1024 *len = x; 1018 buf = kmalloc(x, GFP_NOFS);
1025 1019 if (!buf)
1026out_brelse: 1020 buf = ERR_PTR(-ENOMEM);
1021 else
1022 memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1027 brelse(dibh); 1023 brelse(dibh);
1028out: 1024out:
1029 gfs2_glock_dq_uninit(&i_gh); 1025 gfs2_glock_dq_uninit(&i_gh);
1030 return error; 1026 nd_set_link(nd, buf);
1031} 1027 return NULL;
1032
1033/**
1034 * gfs2_readlink - Read the value of a symlink
1035 * @dentry: the symlink
1036 * @buf: the buffer to read the symlink data into
1037 * @size: the size of the buffer
1038 *
1039 * Returns: errno
1040 */
1041
1042static int gfs2_readlink(struct dentry *dentry, char __user *user_buf,
1043 int user_size)
1044{
1045 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
1046 char array[GFS2_FAST_NAME_SIZE], *buf = array;
1047 unsigned int len = GFS2_FAST_NAME_SIZE;
1048 int error;
1049
1050 error = gfs2_readlinki(ip, &buf, &len);
1051 if (error)
1052 return error;
1053
1054 if (user_size > len - 1)
1055 user_size = len - 1;
1056
1057 if (copy_to_user(user_buf, buf, user_size))
1058 error = -EFAULT;
1059 else
1060 error = user_size;
1061
1062 if (buf != array)
1063 kfree(buf);
1064
1065 return error;
1066} 1028}
1067 1029
1068/** 1030static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
1069 * gfs2_follow_link - Follow a symbolic link
1070 * @dentry: The dentry of the link
1071 * @nd: Data that we pass to vfs_follow_link()
1072 *
1073 * This can handle symlinks of any size. It is optimised for symlinks
1074 * under GFS2_FAST_NAME_SIZE.
1075 *
1076 * Returns: 0 on success or error code
1077 */
1078
1079static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
1080{ 1031{
1081 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 1032 char *s = nd_get_link(nd);
1082 char array[GFS2_FAST_NAME_SIZE], *buf = array; 1033 if (!IS_ERR(s))
1083 unsigned int len = GFS2_FAST_NAME_SIZE; 1034 kfree(s);
1084 int error;
1085
1086 error = gfs2_readlinki(ip, &buf, &len);
1087 if (!error) {
1088 error = vfs_follow_link(nd, buf);
1089 if (buf != array)
1090 kfree(buf);
1091 } else
1092 path_put(&nd->path);
1093
1094 return ERR_PTR(error);
1095} 1035}
1096 1036
1097/** 1037/**
@@ -1426,8 +1366,9 @@ const struct inode_operations gfs2_dir_iops = {
1426}; 1366};
1427 1367
1428const struct inode_operations gfs2_symlink_iops = { 1368const struct inode_operations gfs2_symlink_iops = {
1429 .readlink = gfs2_readlink, 1369 .readlink = generic_readlink,
1430 .follow_link = gfs2_follow_link, 1370 .follow_link = gfs2_follow_link,
1371 .put_link = gfs2_put_link,
1431 .permission = gfs2_permission, 1372 .permission = gfs2_permission,
1432 .setattr = gfs2_setattr, 1373 .setattr = gfs2_setattr,
1433 .getattr = gfs2_getattr, 1374 .getattr = gfs2_getattr,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a8c2bcd0fcc8..50aac606b990 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -22,6 +22,7 @@
22#include <linux/crc32.h> 22#include <linux/crc32.h>
23#include <linux/time.h> 23#include <linux/time.h>
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/writeback.h>
25 26
26#include "gfs2.h" 27#include "gfs2.h"
27#include "incore.h" 28#include "incore.h"
@@ -711,7 +712,7 @@ void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
711 * Returns: errno 712 * Returns: errno
712 */ 713 */
713 714
714static int gfs2_write_inode(struct inode *inode, int sync) 715static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
715{ 716{
716 struct gfs2_inode *ip = GFS2_I(inode); 717 struct gfs2_inode *ip = GFS2_I(inode);
717 struct gfs2_sbd *sdp = GFS2_SB(inode); 718 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -722,8 +723,7 @@ static int gfs2_write_inode(struct inode *inode, int sync)
722 int ret = 0; 723 int ret = 0;
723 724
724 /* Check this is a "normal" inode, etc */ 725 /* Check this is a "normal" inode, etc */
725 if (!test_bit(GIF_USER, &ip->i_flags) || 726 if (current->flags & PF_MEMALLOC)
726 (current->flags & PF_MEMALLOC))
727 return 0; 727 return 0;
728 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 728 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
729 if (ret) 729 if (ret)
@@ -746,7 +746,7 @@ static int gfs2_write_inode(struct inode *inode, int sync)
746do_unlock: 746do_unlock:
747 gfs2_glock_dq_uninit(&gh); 747 gfs2_glock_dq_uninit(&gh);
748do_flush: 748do_flush:
749 if (sync != 0) 749 if (wbc->sync_mode == WB_SYNC_ALL)
750 gfs2_log_flush(GFS2_SB(inode), ip->i_gl); 750 gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
751 return ret; 751 return ret;
752} 752}
@@ -860,6 +860,7 @@ restart:
860 gfs2_clear_rgrpd(sdp); 860 gfs2_clear_rgrpd(sdp);
861 gfs2_jindex_free(sdp); 861 gfs2_jindex_free(sdp);
862 /* Take apart glock structures and buffer lists */ 862 /* Take apart glock structures and buffer lists */
863 invalidate_inodes(sdp->sd_vfs);
863 gfs2_gl_hash_clear(sdp); 864 gfs2_gl_hash_clear(sdp);
864 /* Unmount the locking protocol */ 865 /* Unmount the locking protocol */
865 gfs2_lm_unmount(sdp); 866 gfs2_lm_unmount(sdp);
@@ -1194,7 +1195,7 @@ static void gfs2_drop_inode(struct inode *inode)
1194{ 1195{
1195 struct gfs2_inode *ip = GFS2_I(inode); 1196 struct gfs2_inode *ip = GFS2_I(inode);
1196 1197
1197 if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) { 1198 if (inode->i_nlink) {
1198 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1199 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1199 if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags)) 1200 if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
1200 clear_nlink(inode); 1201 clear_nlink(inode);
@@ -1212,18 +1213,12 @@ static void gfs2_clear_inode(struct inode *inode)
1212{ 1213{
1213 struct gfs2_inode *ip = GFS2_I(inode); 1214 struct gfs2_inode *ip = GFS2_I(inode);
1214 1215
1215 /* This tells us its a "real" inode and not one which only 1216 ip->i_gl->gl_object = NULL;
1216 * serves to contain an address space (see rgrp.c, meta_io.c) 1217 gfs2_glock_put(ip->i_gl);
1217 * which therefore doesn't have its own glocks. 1218 ip->i_gl = NULL;
1218 */ 1219 if (ip->i_iopen_gh.gh_gl) {
1219 if (test_bit(GIF_USER, &ip->i_flags)) { 1220 ip->i_iopen_gh.gh_gl->gl_object = NULL;
1220 ip->i_gl->gl_object = NULL; 1221 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1221 gfs2_glock_put(ip->i_gl);
1222 ip->i_gl = NULL;
1223 if (ip->i_iopen_gh.gh_gl) {
1224 ip->i_iopen_gh.gh_gl->gl_object = NULL;
1225 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1226 }
1227 } 1222 }
1228} 1223}
1229 1224
@@ -1358,9 +1353,6 @@ static void gfs2_delete_inode(struct inode *inode)
1358 struct gfs2_holder gh; 1353 struct gfs2_holder gh;
1359 int error; 1354 int error;
1360 1355
1361 if (!test_bit(GIF_USER, &ip->i_flags))
1362 goto out;
1363
1364 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1356 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1365 if (unlikely(error)) { 1357 if (unlikely(error)) {
1366 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1358 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 4496cc37a0fa..b5f1a46133c8 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -478,7 +478,6 @@ TUNE_ATTR(complain_secs, 0);
478TUNE_ATTR(statfs_slow, 0); 478TUNE_ATTR(statfs_slow, 0);
479TUNE_ATTR(new_files_jdata, 0); 479TUNE_ATTR(new_files_jdata, 0);
480TUNE_ATTR(quota_simul_sync, 1); 480TUNE_ATTR(quota_simul_sync, 1);
481TUNE_ATTR(stall_secs, 1);
482TUNE_ATTR(statfs_quantum, 1); 481TUNE_ATTR(statfs_quantum, 1);
483TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 482TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
484 483
@@ -491,7 +490,6 @@ static struct attribute *tune_attrs[] = {
491 &tune_attr_complain_secs.attr, 490 &tune_attr_complain_secs.attr,
492 &tune_attr_statfs_slow.attr, 491 &tune_attr_statfs_slow.attr,
493 &tune_attr_quota_simul_sync.attr, 492 &tune_attr_quota_simul_sync.attr,
494 &tune_attr_stall_secs.attr,
495 &tune_attr_statfs_quantum.attr, 493 &tune_attr_statfs_quantum.attr,
496 &tune_attr_quota_scale.attr, 494 &tune_attr_quota_scale.attr,
497 &tune_attr_new_files_jdata.attr, 495 &tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f6a7efa34eb9..226f2bfbf16a 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -21,6 +21,7 @@
21#include "util.h" 21#include "util.h"
22 22
23struct kmem_cache *gfs2_glock_cachep __read_mostly; 23struct kmem_cache *gfs2_glock_cachep __read_mostly;
24struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly;
24struct kmem_cache *gfs2_inode_cachep __read_mostly; 25struct kmem_cache *gfs2_inode_cachep __read_mostly;
25struct kmem_cache *gfs2_bufdata_cachep __read_mostly; 26struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
26struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; 27struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 33e96b0ce9ab..b432e04600de 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -145,6 +145,7 @@ gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
145 145
146 146
147extern struct kmem_cache *gfs2_glock_cachep; 147extern struct kmem_cache *gfs2_glock_cachep;
148extern struct kmem_cache *gfs2_glock_aspace_cachep;
148extern struct kmem_cache *gfs2_inode_cachep; 149extern struct kmem_cache *gfs2_inode_cachep;
149extern struct kmem_cache *gfs2_bufdata_cachep; 150extern struct kmem_cache *gfs2_bufdata_cachep;
150extern struct kmem_cache *gfs2_rgrpd_cachep; 151extern struct kmem_cache *gfs2_rgrpd_cachep;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 052387e11671..fe35e3b626c4 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -188,7 +188,7 @@ extern const struct address_space_operations hfs_btree_aops;
188 188
189extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int); 189extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int);
190extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *); 190extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
191extern int hfs_write_inode(struct inode *, int); 191extern int hfs_write_inode(struct inode *, struct writeback_control *);
192extern int hfs_inode_setattr(struct dentry *, struct iattr *); 192extern int hfs_inode_setattr(struct dentry *, struct iattr *);
193extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, 193extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
194 __be32 log_size, __be32 phys_size, u32 clump_size); 194 __be32 log_size, __be32 phys_size, u32 clump_size);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a1cbff2b4d99..14f5cb1b9fdc 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -381,7 +381,7 @@ void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
381 HFS_SB(inode->i_sb)->alloc_blksz); 381 HFS_SB(inode->i_sb)->alloc_blksz);
382} 382}
383 383
384int hfs_write_inode(struct inode *inode, int unused) 384int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
385{ 385{
386 struct inode *main_inode = inode; 386 struct inode *main_inode = inode;
387 struct hfs_find_data fd; 387 struct hfs_find_data fd;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 43022f3d5148..74b473a8ef92 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -87,7 +87,8 @@ bad_inode:
87 return ERR_PTR(err); 87 return ERR_PTR(err);
88} 88}
89 89
90static int hfsplus_write_inode(struct inode *inode, int unused) 90static int hfsplus_write_inode(struct inode *inode,
91 struct writeback_control *wbc)
91{ 92{
92 struct hfsplus_vh *vhdr; 93 struct hfsplus_vh *vhdr;
93 int ret = 0; 94 int ret = 0;
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index 1aa88c4e0964..6a2f04bf3df0 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -353,7 +353,7 @@ int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
353} 353}
354 354
355int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, 355int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
356 unsigned len, char *buf) 356 unsigned len, const char *buf)
357{ 357{
358 struct buffer_head *bh; 358 struct buffer_head *bh;
359 char *data; 359 char *data;
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 940d6d150bee..67d9d36b3d5f 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -20,8 +20,8 @@ static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
20 20
21 if (l == 1) if (qstr->name[0]=='.') goto x; 21 if (l == 1) if (qstr->name[0]=='.') goto x;
22 if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; 22 if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x;
23 hpfs_adjust_length((char *)qstr->name, &l); 23 hpfs_adjust_length(qstr->name, &l);
24 /*if (hpfs_chk_name((char *)qstr->name,&l))*/ 24 /*if (hpfs_chk_name(qstr->name,&l))*/
25 /*return -ENAMETOOLONG;*/ 25 /*return -ENAMETOOLONG;*/
26 /*return -ENOENT;*/ 26 /*return -ENOENT;*/
27 x: 27 x:
@@ -38,14 +38,16 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
38{ 38{
39 unsigned al=a->len; 39 unsigned al=a->len;
40 unsigned bl=b->len; 40 unsigned bl=b->len;
41 hpfs_adjust_length((char *)a->name, &al); 41 hpfs_adjust_length(a->name, &al);
42 /*hpfs_adjust_length((char *)b->name, &bl);*/ 42 /*hpfs_adjust_length(b->name, &bl);*/
43 /* 'a' is the qstr of an already existing dentry, so the name 43 /* 'a' is the qstr of an already existing dentry, so the name
44 * must be valid. 'b' must be validated first. 44 * must be valid. 'b' must be validated first.
45 */ 45 */
46 46
47 if (hpfs_chk_name((char *)b->name, &bl)) return 1; 47 if (hpfs_chk_name(b->name, &bl))
48 if (hpfs_compare_names(dentry->d_sb, (char *)a->name, al, (char *)b->name, bl, 0)) return 1; 48 return 1;
49 if (hpfs_compare_names(dentry->d_sb, a->name, al, b->name, bl, 0))
50 return 1;
49 return 0; 51 return 0;
50} 52}
51 53
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 8865c94f55f6..26e3964a4b8c 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -59,7 +59,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
59 struct hpfs_dirent *de; 59 struct hpfs_dirent *de;
60 int lc; 60 int lc;
61 long old_pos; 61 long old_pos;
62 char *tempname; 62 unsigned char *tempname;
63 int c1, c2 = 0; 63 int c1, c2 = 0;
64 int ret = 0; 64 int ret = 0;
65 65
@@ -158,11 +158,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
158 tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); 158 tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
159 if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) { 159 if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) {
160 filp->f_pos = old_pos; 160 filp->f_pos = old_pos;
161 if (tempname != (char *)de->name) kfree(tempname); 161 if (tempname != de->name) kfree(tempname);
162 hpfs_brelse4(&qbh); 162 hpfs_brelse4(&qbh);
163 goto out; 163 goto out;
164 } 164 }
165 if (tempname != (char *)de->name) kfree(tempname); 165 if (tempname != de->name) kfree(tempname);
166 hpfs_brelse4(&qbh); 166 hpfs_brelse4(&qbh);
167 } 167 }
168out: 168out:
@@ -187,7 +187,7 @@ out:
187 187
188struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 188struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
189{ 189{
190 const char *name = dentry->d_name.name; 190 const unsigned char *name = dentry->d_name.name;
191 unsigned len = dentry->d_name.len; 191 unsigned len = dentry->d_name.len;
192 struct quad_buffer_head qbh; 192 struct quad_buffer_head qbh;
193 struct hpfs_dirent *de; 193 struct hpfs_dirent *de;
@@ -197,7 +197,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
197 struct hpfs_inode_info *hpfs_result; 197 struct hpfs_inode_info *hpfs_result;
198 198
199 lock_kernel(); 199 lock_kernel();
200 if ((err = hpfs_chk_name((char *)name, &len))) { 200 if ((err = hpfs_chk_name(name, &len))) {
201 if (err == -ENAMETOOLONG) { 201 if (err == -ENAMETOOLONG) {
202 unlock_kernel(); 202 unlock_kernel();
203 return ERR_PTR(-ENAMETOOLONG); 203 return ERR_PTR(-ENAMETOOLONG);
@@ -209,7 +209,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
209 * '.' and '..' will never be passed here. 209 * '.' and '..' will never be passed here.
210 */ 210 */
211 211
212 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *) name, len, NULL, &qbh); 212 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh);
213 213
214 /* 214 /*
215 * This is not really a bailout, just means file not found. 215 * This is not really a bailout, just means file not found.
@@ -250,7 +250,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
250 hpfs_result = hpfs_i(result); 250 hpfs_result = hpfs_i(result);
251 if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; 251 if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;
252 252
253 hpfs_decide_conv(result, (char *)name, len); 253 hpfs_decide_conv(result, name, len);
254 254
255 if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { 255 if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
256 hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); 256 hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index fe83c2b7d2d8..9b2ffadfc8c4 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -158,7 +158,8 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
158 158
159/* Add an entry to dnode and don't care if it grows over 2048 bytes */ 159/* Add an entry to dnode and don't care if it grows over 2048 bytes */
160 160
161struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, unsigned char *name, 161struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
162 const unsigned char *name,
162 unsigned namelen, secno down_ptr) 163 unsigned namelen, secno down_ptr)
163{ 164{
164 struct hpfs_dirent *de; 165 struct hpfs_dirent *de;
@@ -223,7 +224,7 @@ static void fix_up_ptrs(struct super_block *s, struct dnode *d)
223/* Add an entry to dnode and do dnode splitting if required */ 224/* Add an entry to dnode and do dnode splitting if required */
224 225
225static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, 226static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
226 unsigned char *name, unsigned namelen, 227 const unsigned char *name, unsigned namelen,
227 struct hpfs_dirent *new_de, dnode_secno down_ptr) 228 struct hpfs_dirent *new_de, dnode_secno down_ptr)
228{ 229{
229 struct quad_buffer_head qbh, qbh1, qbh2; 230 struct quad_buffer_head qbh, qbh1, qbh2;
@@ -231,7 +232,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
231 dnode_secno adno, rdno; 232 dnode_secno adno, rdno;
232 struct hpfs_dirent *de; 233 struct hpfs_dirent *de;
233 struct hpfs_dirent nde; 234 struct hpfs_dirent nde;
234 char *nname; 235 unsigned char *nname;
235 int h; 236 int h;
236 int pos; 237 int pos;
237 struct buffer_head *bh; 238 struct buffer_head *bh;
@@ -305,7 +306,9 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
305 pos++; 306 pos++;
306 } 307 }
307 copy_de(new_de = &nde, de); 308 copy_de(new_de = &nde, de);
308 memcpy(name = nname, de->name, namelen = de->namelen); 309 memcpy(nname, de->name, de->namelen);
310 name = nname;
311 namelen = de->namelen;
309 for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); 312 for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4);
310 down_ptr = adno; 313 down_ptr = adno;
311 set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); 314 set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
@@ -368,7 +371,8 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
368 * I hope, now it's finally bug-free. 371 * I hope, now it's finally bug-free.
369 */ 372 */
370 373
371int hpfs_add_dirent(struct inode *i, unsigned char *name, unsigned namelen, 374int hpfs_add_dirent(struct inode *i,
375 const unsigned char *name, unsigned namelen,
372 struct hpfs_dirent *new_de, int cdepth) 376 struct hpfs_dirent *new_de, int cdepth)
373{ 377{
374 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 378 struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
@@ -897,7 +901,8 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
897 901
898/* Find a dirent in tree */ 902/* Find a dirent in tree */
899 903
900struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, char *name, unsigned len, 904struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno,
905 const unsigned char *name, unsigned len,
901 dnode_secno *dd, struct quad_buffer_head *qbh) 906 dnode_secno *dd, struct quad_buffer_head *qbh)
902{ 907{
903 struct dnode *dnode; 908 struct dnode *dnode;
@@ -988,8 +993,8 @@ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno)
988struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, 993struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
989 struct fnode *f, struct quad_buffer_head *qbh) 994 struct fnode *f, struct quad_buffer_head *qbh)
990{ 995{
991 char *name1; 996 unsigned char *name1;
992 char *name2; 997 unsigned char *name2;
993 int name1len, name2len; 998 int name1len, name2len;
994 struct dnode *d; 999 struct dnode *d;
995 dnode_secno dno, downd; 1000 dnode_secno dno, downd;
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 547a8384571f..45e53d972b42 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -62,8 +62,8 @@ static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size)
62 return ret; 62 return ret;
63} 63}
64 64
65static void set_indirect_ea(struct super_block *s, int ano, secno a, char *data, 65static void set_indirect_ea(struct super_block *s, int ano, secno a,
66 int size) 66 const char *data, int size)
67{ 67{
68 hpfs_ea_write(s, a, ano, 0, size, data); 68 hpfs_ea_write(s, a, ano, 0, size, data);
69} 69}
@@ -186,7 +186,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
186 * This driver can't change sizes of eas ('cause I just don't need it). 186 * This driver can't change sizes of eas ('cause I just don't need it).
187 */ 187 */
188 188
189void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data, int size) 189void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
190 const char *data, int size)
190{ 191{
191 fnode_secno fno = inode->i_ino; 192 fnode_secno fno = inode->i_ino;
192 struct super_block *s = inode->i_sb; 193 struct super_block *s = inode->i_sb;
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 701ca54c0867..97bf738cd5d6 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -215,7 +215,7 @@ secno hpfs_bplus_lookup(struct super_block *, struct inode *, struct bplus_heade
215secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned); 215secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned);
216void hpfs_remove_btree(struct super_block *, struct bplus_header *); 216void hpfs_remove_btree(struct super_block *, struct bplus_header *);
217int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *); 217int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *);
218int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, char *); 218int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, const char *);
219void hpfs_ea_remove(struct super_block *, secno, int, unsigned); 219void hpfs_ea_remove(struct super_block *, secno, int, unsigned);
220void hpfs_truncate_btree(struct super_block *, secno, int, unsigned); 220void hpfs_truncate_btree(struct super_block *, secno, int, unsigned);
221void hpfs_remove_fnode(struct super_block *, fnode_secno fno); 221void hpfs_remove_fnode(struct super_block *, fnode_secno fno);
@@ -244,13 +244,17 @@ extern const struct file_operations hpfs_dir_ops;
244 244
245void hpfs_add_pos(struct inode *, loff_t *); 245void hpfs_add_pos(struct inode *, loff_t *);
246void hpfs_del_pos(struct inode *, loff_t *); 246void hpfs_del_pos(struct inode *, loff_t *);
247struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, unsigned char *, unsigned, secno); 247struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *,
248int hpfs_add_dirent(struct inode *, unsigned char *, unsigned, struct hpfs_dirent *, int); 248 const unsigned char *, unsigned, secno);
249int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned,
250 struct hpfs_dirent *, int);
249int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); 251int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int);
250void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); 252void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *);
251dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); 253dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno);
252struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *); 254struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *);
253struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, char *, unsigned, dnode_secno *, struct quad_buffer_head *); 255struct hpfs_dirent *map_dirent(struct inode *, dnode_secno,
256 const unsigned char *, unsigned, dnode_secno *,
257 struct quad_buffer_head *);
254void hpfs_remove_dtree(struct super_block *, dnode_secno); 258void hpfs_remove_dtree(struct super_block *, dnode_secno);
255struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *); 259struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *);
256 260
@@ -259,7 +263,8 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct f
259void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned); 263void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned);
260int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int); 264int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int);
261char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *); 265char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *);
262void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int); 266void hpfs_set_ea(struct inode *, struct fnode *, const char *,
267 const char *, int);
263 268
264/* file.c */ 269/* file.c */
265 270
@@ -282,7 +287,7 @@ void hpfs_delete_inode(struct inode *);
282 287
283unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); 288unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
284unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); 289unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
285char *hpfs_load_code_page(struct super_block *, secno); 290unsigned char *hpfs_load_code_page(struct super_block *, secno);
286secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp); 291secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
287struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **); 292struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
288struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **); 293struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
@@ -292,12 +297,13 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino);
292/* name.c */ 297/* name.c */
293 298
294unsigned char hpfs_upcase(unsigned char *, unsigned char); 299unsigned char hpfs_upcase(unsigned char *, unsigned char);
295int hpfs_chk_name(unsigned char *, unsigned *); 300int hpfs_chk_name(const unsigned char *, unsigned *);
296char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int); 301unsigned char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int);
297int hpfs_compare_names(struct super_block *, unsigned char *, unsigned, unsigned char *, unsigned, int); 302int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned,
298int hpfs_is_name_long(unsigned char *, unsigned); 303 const unsigned char *, unsigned, int);
299void hpfs_adjust_length(unsigned char *, unsigned *); 304int hpfs_is_name_long(const unsigned char *, unsigned);
300void hpfs_decide_conv(struct inode *, unsigned char *, unsigned); 305void hpfs_adjust_length(const unsigned char *, unsigned *);
306void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned);
301 307
302/* namei.c */ 308/* namei.c */
303 309
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index fe703ae46bc7..ff90affb94e1 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -46,7 +46,7 @@ void hpfs_read_inode(struct inode *i)
46 struct fnode *fnode; 46 struct fnode *fnode;
47 struct super_block *sb = i->i_sb; 47 struct super_block *sb = i->i_sb;
48 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 48 struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
49 unsigned char *ea; 49 void *ea;
50 int ea_size; 50 int ea_size;
51 51
52 if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) { 52 if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
@@ -112,7 +112,7 @@ void hpfs_read_inode(struct inode *i)
112 } 112 }
113 } 113 }
114 if (fnode->dirflag) { 114 if (fnode->dirflag) {
115 unsigned n_dnodes, n_subdirs; 115 int n_dnodes, n_subdirs;
116 i->i_mode |= S_IFDIR; 116 i->i_mode |= S_IFDIR;
117 i->i_op = &hpfs_dir_iops; 117 i->i_op = &hpfs_dir_iops;
118 i->i_fop = &hpfs_dir_ops; 118 i->i_fop = &hpfs_dir_ops;
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index c4724589b2eb..840d033ecee8 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -35,7 +35,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
35 * lowercasing table 35 * lowercasing table
36 */ 36 */
37 37
38char *hpfs_load_code_page(struct super_block *s, secno cps) 38unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
39{ 39{
40 struct buffer_head *bh; 40 struct buffer_head *bh;
41 secno cpds; 41 secno cpds;
@@ -71,7 +71,7 @@ char *hpfs_load_code_page(struct super_block *s, secno cps)
71 brelse(bh); 71 brelse(bh);
72 return NULL; 72 return NULL;
73 } 73 }
74 ptr = (char *)cpd + cpd->offs[cpi] + 6; 74 ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6;
75 if (!(cp_table = kmalloc(256, GFP_KERNEL))) { 75 if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
76 printk("HPFS: out of memory for code page table\n"); 76 printk("HPFS: out of memory for code page table\n");
77 brelse(bh); 77 brelse(bh);
@@ -217,7 +217,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
217 if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD))) 217 if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
218 if (hpfs_sb(s)->sb_chk) { 218 if (hpfs_sb(s)->sb_chk) {
219 unsigned p, pp = 0; 219 unsigned p, pp = 0;
220 unsigned char *d = (char *)dnode; 220 unsigned char *d = (unsigned char *)dnode;
221 int b = 0; 221 int b = 0;
222 if (dnode->magic != DNODE_MAGIC) { 222 if (dnode->magic != DNODE_MAGIC) {
223 hpfs_error(s, "bad magic on dnode %08x", secno); 223 hpfs_error(s, "bad magic on dnode %08x", secno);
diff --git a/fs/hpfs/name.c b/fs/hpfs/name.c
index 1f4a964384eb..f24736d7a439 100644
--- a/fs/hpfs/name.c
+++ b/fs/hpfs/name.c
@@ -8,16 +8,16 @@
8 8
9#include "hpfs_fn.h" 9#include "hpfs_fn.h"
10 10
11static char *text_postfix[]={ 11static const char *text_postfix[]={
12".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF", 12".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF",
13".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS", 13".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS",
14".RC", ".TEX", ".TXT", ".Y", ""}; 14".RC", ".TEX", ".TXT", ".Y", ""};
15 15
16static char *text_prefix[]={ 16static const char *text_prefix[]={
17"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ", 17"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ",
18"MAKEFILE", "READ.ME", "README", "TERMCAP", ""}; 18"MAKEFILE", "READ.ME", "README", "TERMCAP", ""};
19 19
20void hpfs_decide_conv(struct inode *inode, unsigned char *name, unsigned len) 20void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len)
21{ 21{
22 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); 22 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
23 int i; 23 int i;
@@ -71,7 +71,7 @@ static inline unsigned char locase(unsigned char *dir, unsigned char a)
71 return dir[a]; 71 return dir[a];
72} 72}
73 73
74int hpfs_chk_name(unsigned char *name, unsigned *len) 74int hpfs_chk_name(const unsigned char *name, unsigned *len)
75{ 75{
76 int i; 76 int i;
77 if (*len > 254) return -ENAMETOOLONG; 77 if (*len > 254) return -ENAMETOOLONG;
@@ -83,10 +83,10 @@ int hpfs_chk_name(unsigned char *name, unsigned *len)
83 return 0; 83 return 0;
84} 84}
85 85
86char *hpfs_translate_name(struct super_block *s, unsigned char *from, 86unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from,
87 unsigned len, int lc, int lng) 87 unsigned len, int lc, int lng)
88{ 88{
89 char *to; 89 unsigned char *to;
90 int i; 90 int i;
91 if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) { 91 if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) {
92 printk("HPFS: Long name flag mismatch - name "); 92 printk("HPFS: Long name flag mismatch - name ");
@@ -103,8 +103,9 @@ char *hpfs_translate_name(struct super_block *s, unsigned char *from,
103 return to; 103 return to;
104} 104}
105 105
106int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1, 106int hpfs_compare_names(struct super_block *s,
107 unsigned char *n2, unsigned l2, int last) 107 const unsigned char *n1, unsigned l1,
108 const unsigned char *n2, unsigned l2, int last)
108{ 109{
109 unsigned l = l1 < l2 ? l1 : l2; 110 unsigned l = l1 < l2 ? l1 : l2;
110 unsigned i; 111 unsigned i;
@@ -120,7 +121,7 @@ int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1,
120 return 0; 121 return 0;
121} 122}
122 123
123int hpfs_is_name_long(unsigned char *name, unsigned len) 124int hpfs_is_name_long(const unsigned char *name, unsigned len)
124{ 125{
125 int i,j; 126 int i,j;
126 for (i = 0; i < len && name[i] != '.'; i++) 127 for (i = 0; i < len && name[i] != '.'; i++)
@@ -134,7 +135,7 @@ int hpfs_is_name_long(unsigned char *name, unsigned len)
134 135
135/* OS/2 clears dots and spaces at the end of file name, so we have to */ 136/* OS/2 clears dots and spaces at the end of file name, so we have to */
136 137
137void hpfs_adjust_length(unsigned char *name, unsigned *len) 138void hpfs_adjust_length(const unsigned char *name, unsigned *len)
138{ 139{
139 if (!*len) return; 140 if (!*len) return;
140 if (*len == 1 && name[0] == '.') return; 141 if (*len == 1 && name[0] == '.') return;
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 82b9c4ba9ed0..11c2b4080f65 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -11,7 +11,7 @@
11 11
12static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 12static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
13{ 13{
14 const char *name = dentry->d_name.name; 14 const unsigned char *name = dentry->d_name.name;
15 unsigned len = dentry->d_name.len; 15 unsigned len = dentry->d_name.len;
16 struct quad_buffer_head qbh0; 16 struct quad_buffer_head qbh0;
17 struct buffer_head *bh; 17 struct buffer_head *bh;
@@ -24,7 +24,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
24 int r; 24 int r;
25 struct hpfs_dirent dee; 25 struct hpfs_dirent dee;
26 int err; 26 int err;
27 if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; 27 if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
28 lock_kernel(); 28 lock_kernel();
29 err = -ENOSPC; 29 err = -ENOSPC;
30 fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); 30 fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
@@ -62,7 +62,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
62 result->i_mode &= ~0222; 62 result->i_mode &= ~0222;
63 63
64 mutex_lock(&hpfs_i(dir)->i_mutex); 64 mutex_lock(&hpfs_i(dir)->i_mutex);
65 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 65 r = hpfs_add_dirent(dir, name, len, &dee, 0);
66 if (r == 1) 66 if (r == 1)
67 goto bail3; 67 goto bail3;
68 if (r == -1) { 68 if (r == -1) {
@@ -121,7 +121,7 @@ bail:
121 121
122static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) 122static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
123{ 123{
124 const char *name = dentry->d_name.name; 124 const unsigned char *name = dentry->d_name.name;
125 unsigned len = dentry->d_name.len; 125 unsigned len = dentry->d_name.len;
126 struct inode *result = NULL; 126 struct inode *result = NULL;
127 struct buffer_head *bh; 127 struct buffer_head *bh;
@@ -130,7 +130,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
130 int r; 130 int r;
131 struct hpfs_dirent dee; 131 struct hpfs_dirent dee;
132 int err; 132 int err;
133 if ((err = hpfs_chk_name((char *)name, &len))) 133 if ((err = hpfs_chk_name(name, &len)))
134 return err==-ENOENT ? -EINVAL : err; 134 return err==-ENOENT ? -EINVAL : err;
135 lock_kernel(); 135 lock_kernel();
136 err = -ENOSPC; 136 err = -ENOSPC;
@@ -155,7 +155,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
155 result->i_op = &hpfs_file_iops; 155 result->i_op = &hpfs_file_iops;
156 result->i_fop = &hpfs_file_ops; 156 result->i_fop = &hpfs_file_ops;
157 result->i_nlink = 1; 157 result->i_nlink = 1;
158 hpfs_decide_conv(result, (char *)name, len); 158 hpfs_decide_conv(result, name, len);
159 hpfs_i(result)->i_parent_dir = dir->i_ino; 159 hpfs_i(result)->i_parent_dir = dir->i_ino;
160 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 160 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
161 result->i_ctime.tv_nsec = 0; 161 result->i_ctime.tv_nsec = 0;
@@ -170,7 +170,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
170 hpfs_i(result)->mmu_private = 0; 170 hpfs_i(result)->mmu_private = 0;
171 171
172 mutex_lock(&hpfs_i(dir)->i_mutex); 172 mutex_lock(&hpfs_i(dir)->i_mutex);
173 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 173 r = hpfs_add_dirent(dir, name, len, &dee, 0);
174 if (r == 1) 174 if (r == 1)
175 goto bail2; 175 goto bail2;
176 if (r == -1) { 176 if (r == -1) {
@@ -211,7 +211,7 @@ bail:
211 211
212static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) 212static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
213{ 213{
214 const char *name = dentry->d_name.name; 214 const unsigned char *name = dentry->d_name.name;
215 unsigned len = dentry->d_name.len; 215 unsigned len = dentry->d_name.len;
216 struct buffer_head *bh; 216 struct buffer_head *bh;
217 struct fnode *fnode; 217 struct fnode *fnode;
@@ -220,7 +220,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
220 struct hpfs_dirent dee; 220 struct hpfs_dirent dee;
221 struct inode *result = NULL; 221 struct inode *result = NULL;
222 int err; 222 int err;
223 if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; 223 if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
224 if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM; 224 if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM;
225 if (!new_valid_dev(rdev)) 225 if (!new_valid_dev(rdev))
226 return -EINVAL; 226 return -EINVAL;
@@ -256,7 +256,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
256 init_special_inode(result, mode, rdev); 256 init_special_inode(result, mode, rdev);
257 257
258 mutex_lock(&hpfs_i(dir)->i_mutex); 258 mutex_lock(&hpfs_i(dir)->i_mutex);
259 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 259 r = hpfs_add_dirent(dir, name, len, &dee, 0);
260 if (r == 1) 260 if (r == 1)
261 goto bail2; 261 goto bail2;
262 if (r == -1) { 262 if (r == -1) {
@@ -289,7 +289,7 @@ bail:
289 289
290static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink) 290static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink)
291{ 291{
292 const char *name = dentry->d_name.name; 292 const unsigned char *name = dentry->d_name.name;
293 unsigned len = dentry->d_name.len; 293 unsigned len = dentry->d_name.len;
294 struct buffer_head *bh; 294 struct buffer_head *bh;
295 struct fnode *fnode; 295 struct fnode *fnode;
@@ -298,7 +298,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
298 struct hpfs_dirent dee; 298 struct hpfs_dirent dee;
299 struct inode *result; 299 struct inode *result;
300 int err; 300 int err;
301 if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; 301 if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
302 lock_kernel(); 302 lock_kernel();
303 if (hpfs_sb(dir->i_sb)->sb_eas < 2) { 303 if (hpfs_sb(dir->i_sb)->sb_eas < 2) {
304 unlock_kernel(); 304 unlock_kernel();
@@ -335,7 +335,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
335 result->i_data.a_ops = &hpfs_symlink_aops; 335 result->i_data.a_ops = &hpfs_symlink_aops;
336 336
337 mutex_lock(&hpfs_i(dir)->i_mutex); 337 mutex_lock(&hpfs_i(dir)->i_mutex);
338 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 338 r = hpfs_add_dirent(dir, name, len, &dee, 0);
339 if (r == 1) 339 if (r == 1)
340 goto bail2; 340 goto bail2;
341 if (r == -1) { 341 if (r == -1) {
@@ -345,7 +345,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
345 fnode->len = len; 345 fnode->len = len;
346 memcpy(fnode->name, name, len > 15 ? 15 : len); 346 memcpy(fnode->name, name, len > 15 ? 15 : len);
347 fnode->up = dir->i_ino; 347 fnode->up = dir->i_ino;
348 hpfs_set_ea(result, fnode, "SYMLINK", (char *)symlink, strlen(symlink)); 348 hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink));
349 mark_buffer_dirty(bh); 349 mark_buffer_dirty(bh);
350 brelse(bh); 350 brelse(bh);
351 351
@@ -369,7 +369,7 @@ bail:
369 369
370static int hpfs_unlink(struct inode *dir, struct dentry *dentry) 370static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
371{ 371{
372 const char *name = dentry->d_name.name; 372 const unsigned char *name = dentry->d_name.name;
373 unsigned len = dentry->d_name.len; 373 unsigned len = dentry->d_name.len;
374 struct quad_buffer_head qbh; 374 struct quad_buffer_head qbh;
375 struct hpfs_dirent *de; 375 struct hpfs_dirent *de;
@@ -381,12 +381,12 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
381 int err; 381 int err;
382 382
383 lock_kernel(); 383 lock_kernel();
384 hpfs_adjust_length((char *)name, &len); 384 hpfs_adjust_length(name, &len);
385again: 385again:
386 mutex_lock(&hpfs_i(inode)->i_parent_mutex); 386 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
387 mutex_lock(&hpfs_i(dir)->i_mutex); 387 mutex_lock(&hpfs_i(dir)->i_mutex);
388 err = -ENOENT; 388 err = -ENOENT;
389 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 389 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
390 if (!de) 390 if (!de)
391 goto out; 391 goto out;
392 392
@@ -413,22 +413,25 @@ again:
413 413
414 mutex_unlock(&hpfs_i(dir)->i_mutex); 414 mutex_unlock(&hpfs_i(dir)->i_mutex);
415 mutex_unlock(&hpfs_i(inode)->i_parent_mutex); 415 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
416 d_drop(dentry); 416 dentry_unhash(dentry);
417 spin_lock(&dentry->d_lock); 417 if (!d_unhashed(dentry)) {
418 if (atomic_read(&dentry->d_count) > 1 || 418 dput(dentry);
419 generic_permission(inode, MAY_WRITE, NULL) || 419 unlock_kernel();
420 return -ENOSPC;
421 }
422 if (generic_permission(inode, MAY_WRITE, NULL) ||
420 !S_ISREG(inode->i_mode) || 423 !S_ISREG(inode->i_mode) ||
421 get_write_access(inode)) { 424 get_write_access(inode)) {
422 spin_unlock(&dentry->d_lock);
423 d_rehash(dentry); 425 d_rehash(dentry);
426 dput(dentry);
424 } else { 427 } else {
425 struct iattr newattrs; 428 struct iattr newattrs;
426 spin_unlock(&dentry->d_lock);
427 /*printk("HPFS: truncating file before delete.\n");*/ 429 /*printk("HPFS: truncating file before delete.\n");*/
428 newattrs.ia_size = 0; 430 newattrs.ia_size = 0;
429 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; 431 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
430 err = notify_change(dentry, &newattrs); 432 err = notify_change(dentry, &newattrs);
431 put_write_access(inode); 433 put_write_access(inode);
434 dput(dentry);
432 if (!err) 435 if (!err)
433 goto again; 436 goto again;
434 } 437 }
@@ -451,7 +454,7 @@ out:
451 454
452static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) 455static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
453{ 456{
454 const char *name = dentry->d_name.name; 457 const unsigned char *name = dentry->d_name.name;
455 unsigned len = dentry->d_name.len; 458 unsigned len = dentry->d_name.len;
456 struct quad_buffer_head qbh; 459 struct quad_buffer_head qbh;
457 struct hpfs_dirent *de; 460 struct hpfs_dirent *de;
@@ -462,12 +465,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
462 int err; 465 int err;
463 int r; 466 int r;
464 467
465 hpfs_adjust_length((char *)name, &len); 468 hpfs_adjust_length(name, &len);
466 lock_kernel(); 469 lock_kernel();
467 mutex_lock(&hpfs_i(inode)->i_parent_mutex); 470 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
468 mutex_lock(&hpfs_i(dir)->i_mutex); 471 mutex_lock(&hpfs_i(dir)->i_mutex);
469 err = -ENOENT; 472 err = -ENOENT;
470 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 473 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
471 if (!de) 474 if (!de)
472 goto out; 475 goto out;
473 476
@@ -546,10 +549,10 @@ const struct address_space_operations hpfs_symlink_aops = {
546static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, 549static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
547 struct inode *new_dir, struct dentry *new_dentry) 550 struct inode *new_dir, struct dentry *new_dentry)
548{ 551{
549 char *old_name = (char *)old_dentry->d_name.name; 552 const unsigned char *old_name = old_dentry->d_name.name;
550 int old_len = old_dentry->d_name.len; 553 unsigned old_len = old_dentry->d_name.len;
551 char *new_name = (char *)new_dentry->d_name.name; 554 const unsigned char *new_name = new_dentry->d_name.name;
552 int new_len = new_dentry->d_name.len; 555 unsigned new_len = new_dentry->d_name.len;
553 struct inode *i = old_dentry->d_inode; 556 struct inode *i = old_dentry->d_inode;
554 struct inode *new_inode = new_dentry->d_inode; 557 struct inode *new_inode = new_dentry->d_inode;
555 struct quad_buffer_head qbh, qbh1; 558 struct quad_buffer_head qbh, qbh1;
@@ -560,9 +563,9 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
560 struct buffer_head *bh; 563 struct buffer_head *bh;
561 struct fnode *fnode; 564 struct fnode *fnode;
562 int err; 565 int err;
563 if ((err = hpfs_chk_name((char *)new_name, &new_len))) return err; 566 if ((err = hpfs_chk_name(new_name, &new_len))) return err;
564 err = 0; 567 err = 0;
565 hpfs_adjust_length((char *)old_name, &old_len); 568 hpfs_adjust_length(old_name, &old_len);
566 569
567 lock_kernel(); 570 lock_kernel();
568 /* order doesn't matter, due to VFS exclusion */ 571 /* order doesn't matter, due to VFS exclusion */
@@ -579,7 +582,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
579 goto end1; 582 goto end1;
580 } 583 }
581 584
582 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) { 585 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
583 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed"); 586 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed");
584 err = -ENOENT; 587 err = -ENOENT;
585 goto end1; 588 goto end1;
@@ -590,7 +593,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
590 if (new_inode) { 593 if (new_inode) {
591 int r; 594 int r;
592 if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) { 595 if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
593 if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, (char *)new_name, new_len, NULL, &qbh1))) { 596 if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, new_name, new_len, NULL, &qbh1))) {
594 clear_nlink(new_inode); 597 clear_nlink(new_inode);
595 copy_de(nde, &de); 598 copy_de(nde, &de);
596 memcpy(nde->name, new_name, new_len); 599 memcpy(nde->name, new_name, new_len);
@@ -618,7 +621,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
618 } 621 }
619 622
620 if (new_dir == old_dir) 623 if (new_dir == old_dir)
621 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) { 624 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
622 hpfs_unlock_creation(i->i_sb); 625 hpfs_unlock_creation(i->i_sb);
623 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); 626 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
624 err = -ENOENT; 627 err = -ENOENT;
@@ -648,7 +651,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
648 brelse(bh); 651 brelse(bh);
649 } 652 }
650 hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv; 653 hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv;
651 hpfs_decide_conv(i, (char *)new_name, new_len); 654 hpfs_decide_conv(i, new_name, new_len);
652end1: 655end1:
653 if (old_dir != new_dir) 656 if (old_dir != new_dir)
654 mutex_unlock(&hpfs_i(new_dir)->i_mutex); 657 mutex_unlock(&hpfs_i(new_dir)->i_mutex);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 7239efc690d8..2e4dfa8593da 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -718,7 +718,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
718 struct vfsmount *proc_mnt; 718 struct vfsmount *proc_mnt;
719 int err = -ENOENT; 719 int err = -ENOENT;
720 720
721 proc_mnt = do_kern_mount("proc", 0, "proc", NULL); 721 proc_mnt = mntget(current->nsproxy->pid_ns->proc_mnt);
722 if (IS_ERR(proc_mnt)) 722 if (IS_ERR(proc_mnt))
723 goto out; 723 goto out;
724 724
diff --git a/fs/internal.h b/fs/internal.h
index e96a1667d749..8a03a5447bdf 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -70,6 +70,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
70 70
71extern void __init mnt_init(void); 71extern void __init mnt_init(void);
72 72
73extern spinlock_t vfsmount_lock;
74
73/* 75/*
74 * fs_struct.c 76 * fs_struct.c
75 */ 77 */
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 886849370950..30beb11ef928 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -507,6 +507,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
507 if (blocknr < journal->j_tail) 507 if (blocknr < journal->j_tail)
508 freed = freed + journal->j_last - journal->j_first; 508 freed = freed + journal->j_last - journal->j_first;
509 509
510 trace_jbd2_cleanup_journal_tail(journal, first_tid, blocknr, freed);
510 jbd_debug(1, 511 jbd_debug(1,
511 "Cleaning journal tail from %d to %d (offset %lu), " 512 "Cleaning journal tail from %d to %d (offset %lu), "
512 "freeing %lu\n", 513 "freeing %lu\n",
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 1bc74b6f26d2..671da7fb7ffd 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -883,8 +883,7 @@ restart_loop:
883 spin_unlock(&journal->j_list_lock); 883 spin_unlock(&journal->j_list_lock);
884 bh = jh2bh(jh); 884 bh = jh2bh(jh);
885 jbd_lock_bh_state(bh); 885 jbd_lock_bh_state(bh);
886 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || 886 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
887 jh->b_transaction == journal->j_running_transaction);
888 887
889 /* 888 /*
890 * If there is undo-protected committed data against 889 * If there is undo-protected committed data against
@@ -930,12 +929,12 @@ restart_loop:
930 /* A buffer which has been freed while still being 929 /* A buffer which has been freed while still being
931 * journaled by a previous transaction may end up still 930 * journaled by a previous transaction may end up still
932 * being dirty here, but we want to avoid writing back 931 * being dirty here, but we want to avoid writing back
933 * that buffer in the future now that the last use has 932 * that buffer in the future after the "add to orphan"
934 * been committed. That's not only a performance gain, 933 * operation been committed, That's not only a performance
935 * it also stops aliasing problems if the buffer is left 934 * gain, it also stops aliasing problems if the buffer is
936 * behind for writeback and gets reallocated for another 935 * left behind for writeback and gets reallocated for another
937 * use in a different page. */ 936 * use in a different page. */
938 if (buffer_freed(bh)) { 937 if (buffer_freed(bh) && !jh->b_next_transaction) {
939 clear_buffer_freed(bh); 938 clear_buffer_freed(bh);
940 clear_buffer_jbddirty(bh); 939 clear_buffer_jbddirty(bh);
941 } 940 }
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ac0d027595d0..c03d4dce4d76 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -39,6 +39,8 @@
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40#include <linux/math64.h> 40#include <linux/math64.h>
41#include <linux/hash.h> 41#include <linux/hash.h>
42#include <linux/log2.h>
43#include <linux/vmalloc.h>
42 44
43#define CREATE_TRACE_POINTS 45#define CREATE_TRACE_POINTS
44#include <trace/events/jbd2.h> 46#include <trace/events/jbd2.h>
@@ -93,6 +95,7 @@ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
93 95
94static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 96static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
95static void __journal_abort_soft (journal_t *journal, int errno); 97static void __journal_abort_soft (journal_t *journal, int errno);
98static int jbd2_journal_create_slab(size_t slab_size);
96 99
97/* 100/*
98 * Helper function used to manage commit timeouts 101 * Helper function used to manage commit timeouts
@@ -1248,6 +1251,13 @@ int jbd2_journal_load(journal_t *journal)
1248 } 1251 }
1249 } 1252 }
1250 1253
1254 /*
1255 * Create a slab for this blocksize
1256 */
1257 err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize));
1258 if (err)
1259 return err;
1260
1251 /* Let the recovery code check whether it needs to recover any 1261 /* Let the recovery code check whether it needs to recover any
1252 * data from the journal. */ 1262 * data from the journal. */
1253 if (jbd2_journal_recover(journal)) 1263 if (jbd2_journal_recover(journal))
@@ -1807,6 +1817,127 @@ size_t journal_tag_bytes(journal_t *journal)
1807} 1817}
1808 1818
1809/* 1819/*
1820 * JBD memory management
1821 *
1822 * These functions are used to allocate block-sized chunks of memory
1823 * used for making copies of buffer_head data. Very often it will be
1824 * page-sized chunks of data, but sometimes it will be in
1825 * sub-page-size chunks. (For example, 16k pages on Power systems
1826 * with a 4k block file system.) For blocks smaller than a page, we
1827 * use a SLAB allocator. There are slab caches for each block size,
1828 * which are allocated at mount time, if necessary, and we only free
1829 * (all of) the slab caches when/if the jbd2 module is unloaded. For
1830 * this reason we don't need to a mutex to protect access to
1831 * jbd2_slab[] allocating or releasing memory; only in
1832 * jbd2_journal_create_slab().
1833 */
1834#define JBD2_MAX_SLABS 8
1835static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS];
1836static DECLARE_MUTEX(jbd2_slab_create_sem);
1837
1838static const char *jbd2_slab_names[JBD2_MAX_SLABS] = {
1839 "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k",
1840 "jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k"
1841};
1842
1843
1844static void jbd2_journal_destroy_slabs(void)
1845{
1846 int i;
1847
1848 for (i = 0; i < JBD2_MAX_SLABS; i++) {
1849 if (jbd2_slab[i])
1850 kmem_cache_destroy(jbd2_slab[i]);
1851 jbd2_slab[i] = NULL;
1852 }
1853}
1854
1855static int jbd2_journal_create_slab(size_t size)
1856{
1857 int i = order_base_2(size) - 10;
1858 size_t slab_size;
1859
1860 if (size == PAGE_SIZE)
1861 return 0;
1862
1863 if (i >= JBD2_MAX_SLABS)
1864 return -EINVAL;
1865
1866 if (unlikely(i < 0))
1867 i = 0;
1868 down(&jbd2_slab_create_sem);
1869 if (jbd2_slab[i]) {
1870 up(&jbd2_slab_create_sem);
1871 return 0; /* Already created */
1872 }
1873
1874 slab_size = 1 << (i+10);
1875 jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size,
1876 slab_size, 0, NULL);
1877 up(&jbd2_slab_create_sem);
1878 if (!jbd2_slab[i]) {
1879 printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n");
1880 return -ENOMEM;
1881 }
1882 return 0;
1883}
1884
1885static struct kmem_cache *get_slab(size_t size)
1886{
1887 int i = order_base_2(size) - 10;
1888
1889 BUG_ON(i >= JBD2_MAX_SLABS);
1890 if (unlikely(i < 0))
1891 i = 0;
1892 BUG_ON(jbd2_slab[i] == 0);
1893 return jbd2_slab[i];
1894}
1895
1896void *jbd2_alloc(size_t size, gfp_t flags)
1897{
1898 void *ptr;
1899
1900 BUG_ON(size & (size-1)); /* Must be a power of 2 */
1901
1902 flags |= __GFP_REPEAT;
1903 if (size == PAGE_SIZE)
1904 ptr = (void *)__get_free_pages(flags, 0);
1905 else if (size > PAGE_SIZE) {
1906 int order = get_order(size);
1907
1908 if (order < 3)
1909 ptr = (void *)__get_free_pages(flags, order);
1910 else
1911 ptr = vmalloc(size);
1912 } else
1913 ptr = kmem_cache_alloc(get_slab(size), flags);
1914
1915 /* Check alignment; SLUB has gotten this wrong in the past,
1916 * and this can lead to user data corruption! */
1917 BUG_ON(((unsigned long) ptr) & (size-1));
1918
1919 return ptr;
1920}
1921
1922void jbd2_free(void *ptr, size_t size)
1923{
1924 if (size == PAGE_SIZE) {
1925 free_pages((unsigned long)ptr, 0);
1926 return;
1927 }
1928 if (size > PAGE_SIZE) {
1929 int order = get_order(size);
1930
1931 if (order < 3)
1932 free_pages((unsigned long)ptr, order);
1933 else
1934 vfree(ptr);
1935 return;
1936 }
1937 kmem_cache_free(get_slab(size), ptr);
1938};
1939
1940/*
1810 * Journal_head storage management 1941 * Journal_head storage management
1811 */ 1942 */
1812static struct kmem_cache *jbd2_journal_head_cache; 1943static struct kmem_cache *jbd2_journal_head_cache;
@@ -2204,6 +2335,7 @@ static void jbd2_journal_destroy_caches(void)
2204 jbd2_journal_destroy_revoke_caches(); 2335 jbd2_journal_destroy_revoke_caches();
2205 jbd2_journal_destroy_jbd2_journal_head_cache(); 2336 jbd2_journal_destroy_jbd2_journal_head_cache();
2206 jbd2_journal_destroy_handle_cache(); 2337 jbd2_journal_destroy_handle_cache();
2338 jbd2_journal_destroy_slabs();
2207} 2339}
2208 2340
2209static int __init journal_init(void) 2341static int __init journal_init(void)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a0512700542f..bfc70f57900f 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1727,6 +1727,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1727 if (!jh) 1727 if (!jh)
1728 goto zap_buffer_no_jh; 1728 goto zap_buffer_no_jh;
1729 1729
1730 /*
1731 * We cannot remove the buffer from checkpoint lists until the
1732 * transaction adding inode to orphan list (let's call it T)
1733 * is committed. Otherwise if the transaction changing the
1734 * buffer would be cleaned from the journal before T is
1735 * committed, a crash will cause that the correct contents of
1736 * the buffer will be lost. On the other hand we have to
1737 * clear the buffer dirty bit at latest at the moment when the
1738 * transaction marking the buffer as freed in the filesystem
1739 * structures is committed because from that moment on the
1740 * buffer can be reallocated and used by a different page.
1741 * Since the block hasn't been freed yet but the inode has
1742 * already been added to orphan list, it is safe for us to add
1743 * the buffer to BJ_Forget list of the newest transaction.
1744 */
1730 transaction = jh->b_transaction; 1745 transaction = jh->b_transaction;
1731 if (transaction == NULL) { 1746 if (transaction == NULL) {
1732 /* First case: not on any transaction. If it 1747 /* First case: not on any transaction. If it
@@ -1783,16 +1798,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1783 } else if (transaction == journal->j_committing_transaction) { 1798 } else if (transaction == journal->j_committing_transaction) {
1784 JBUFFER_TRACE(jh, "on committing transaction"); 1799 JBUFFER_TRACE(jh, "on committing transaction");
1785 /* 1800 /*
1786 * If it is committing, we simply cannot touch it. We 1801 * The buffer is committing, we simply cannot touch
1787 * can remove it's next_transaction pointer from the 1802 * it. So we just set j_next_transaction to the
1788 * running transaction if that is set, but nothing 1803 * running transaction (if there is one) and mark
1789 * else. */ 1804 * buffer as freed so that commit code knows it should
1805 * clear dirty bits when it is done with the buffer.
1806 */
1790 set_buffer_freed(bh); 1807 set_buffer_freed(bh);
1791 if (jh->b_next_transaction) { 1808 if (journal->j_running_transaction && buffer_jbddirty(bh))
1792 J_ASSERT(jh->b_next_transaction == 1809 jh->b_next_transaction = journal->j_running_transaction;
1793 journal->j_running_transaction);
1794 jh->b_next_transaction = NULL;
1795 }
1796 jbd2_journal_put_journal_head(jh); 1810 jbd2_journal_put_journal_head(jh);
1797 spin_unlock(&journal->j_list_lock); 1811 spin_unlock(&journal->j_list_lock);
1798 jbd_unlock_bh_state(bh); 1812 jbd_unlock_bh_state(bh);
@@ -1969,7 +1983,7 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
1969 */ 1983 */
1970void __jbd2_journal_refile_buffer(struct journal_head *jh) 1984void __jbd2_journal_refile_buffer(struct journal_head *jh)
1971{ 1985{
1972 int was_dirty; 1986 int was_dirty, jlist;
1973 struct buffer_head *bh = jh2bh(jh); 1987 struct buffer_head *bh = jh2bh(jh);
1974 1988
1975 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 1989 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
@@ -1991,8 +2005,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
1991 __jbd2_journal_temp_unlink_buffer(jh); 2005 __jbd2_journal_temp_unlink_buffer(jh);
1992 jh->b_transaction = jh->b_next_transaction; 2006 jh->b_transaction = jh->b_next_transaction;
1993 jh->b_next_transaction = NULL; 2007 jh->b_next_transaction = NULL;
1994 __jbd2_journal_file_buffer(jh, jh->b_transaction, 2008 if (buffer_freed(bh))
1995 jh->b_modified ? BJ_Metadata : BJ_Reserved); 2009 jlist = BJ_Forget;
2010 else if (jh->b_modified)
2011 jlist = BJ_Metadata;
2012 else
2013 jlist = BJ_Reserved;
2014 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
1996 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); 2015 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
1997 2016
1998 if (was_dirty) 2017 if (was_dirty)
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index c694a5f15380..9dd126276c9f 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -22,6 +22,7 @@
22#include <linux/buffer_head.h> 22#include <linux/buffer_head.h>
23#include <linux/pagemap.h> 23#include <linux/pagemap.h>
24#include <linux/quotaops.h> 24#include <linux/quotaops.h>
25#include <linux/writeback.h>
25#include "jfs_incore.h" 26#include "jfs_incore.h"
26#include "jfs_inode.h" 27#include "jfs_inode.h"
27#include "jfs_filsys.h" 28#include "jfs_filsys.h"
@@ -120,8 +121,10 @@ int jfs_commit_inode(struct inode *inode, int wait)
120 return rc; 121 return rc;
121} 122}
122 123
123int jfs_write_inode(struct inode *inode, int wait) 124int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
124{ 125{
126 int wait = wbc->sync_mode == WB_SYNC_ALL;
127
125 if (test_cflag(COMMIT_Nolink, inode)) 128 if (test_cflag(COMMIT_Nolink, inode))
126 return 0; 129 return 0;
127 /* 130 /*
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 4b91b2787835..79e2c79661df 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -26,7 +26,7 @@ extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
26extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); 26extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long);
27extern struct inode *jfs_iget(struct super_block *, unsigned long); 27extern struct inode *jfs_iget(struct super_block *, unsigned long);
28extern int jfs_commit_inode(struct inode *, int); 28extern int jfs_commit_inode(struct inode *, int);
29extern int jfs_write_inode(struct inode*, int); 29extern int jfs_write_inode(struct inode *, struct writeback_control *);
30extern void jfs_delete_inode(struct inode *); 30extern void jfs_delete_inode(struct inode *);
31extern void jfs_dirty_inode(struct inode *); 31extern void jfs_dirty_inode(struct inode *);
32extern void jfs_truncate(struct inode *); 32extern void jfs_truncate(struct inode *);
diff --git a/fs/libfs.c b/fs/libfs.c
index 6e8d17e1dc4c..9e50bcf55857 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -338,28 +338,14 @@ int simple_readpage(struct file *file, struct page *page)
338 return 0; 338 return 0;
339} 339}
340 340
341int simple_prepare_write(struct file *file, struct page *page,
342 unsigned from, unsigned to)
343{
344 if (!PageUptodate(page)) {
345 if (to - from != PAGE_CACHE_SIZE)
346 zero_user_segments(page,
347 0, from,
348 to, PAGE_CACHE_SIZE);
349 }
350 return 0;
351}
352
353int simple_write_begin(struct file *file, struct address_space *mapping, 341int simple_write_begin(struct file *file, struct address_space *mapping,
354 loff_t pos, unsigned len, unsigned flags, 342 loff_t pos, unsigned len, unsigned flags,
355 struct page **pagep, void **fsdata) 343 struct page **pagep, void **fsdata)
356{ 344{
357 struct page *page; 345 struct page *page;
358 pgoff_t index; 346 pgoff_t index;
359 unsigned from;
360 347
361 index = pos >> PAGE_CACHE_SHIFT; 348 index = pos >> PAGE_CACHE_SHIFT;
362 from = pos & (PAGE_CACHE_SIZE - 1);
363 349
364 page = grab_cache_page_write_begin(mapping, index, flags); 350 page = grab_cache_page_write_begin(mapping, index, flags);
365 if (!page) 351 if (!page)
@@ -367,43 +353,59 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
367 353
368 *pagep = page; 354 *pagep = page;
369 355
370 return simple_prepare_write(file, page, from, from+len); 356 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
371} 357 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
372
373static int simple_commit_write(struct file *file, struct page *page,
374 unsigned from, unsigned to)
375{
376 struct inode *inode = page->mapping->host;
377 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
378 358
379 if (!PageUptodate(page)) 359 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
380 SetPageUptodate(page); 360 }
381 /*
382 * No need to use i_size_read() here, the i_size
383 * cannot change under us because we hold the i_mutex.
384 */
385 if (pos > inode->i_size)
386 i_size_write(inode, pos);
387 set_page_dirty(page);
388 return 0; 361 return 0;
389} 362}
390 363
364/**
365 * simple_write_end - .write_end helper for non-block-device FSes
366 * @available: See .write_end of address_space_operations
367 * @file: "
368 * @mapping: "
369 * @pos: "
370 * @len: "
371 * @copied: "
372 * @page: "
373 * @fsdata: "
374 *
375 * simple_write_end does the minimum needed for updating a page after writing is
376 * done. It has the same API signature as the .write_end of
377 * address_space_operations vector. So it can just be set onto .write_end for
378 * FSes that don't need any other processing. i_mutex is assumed to be held.
379 * Block based filesystems should use generic_write_end().
380 * NOTE: Even though i_size might get updated by this function, mark_inode_dirty
381 * is not called, so a filesystem that actually does store data in .write_inode
382 * should extend on what's done here with a call to mark_inode_dirty() in the
383 * case that i_size has changed.
384 */
391int simple_write_end(struct file *file, struct address_space *mapping, 385int simple_write_end(struct file *file, struct address_space *mapping,
392 loff_t pos, unsigned len, unsigned copied, 386 loff_t pos, unsigned len, unsigned copied,
393 struct page *page, void *fsdata) 387 struct page *page, void *fsdata)
394{ 388{
395 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 389 struct inode *inode = page->mapping->host;
390 loff_t last_pos = pos + copied;
396 391
397 /* zero the stale part of the page if we did a short copy */ 392 /* zero the stale part of the page if we did a short copy */
398 if (copied < len) { 393 if (copied < len) {
399 void *kaddr = kmap_atomic(page, KM_USER0); 394 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
400 memset(kaddr + from + copied, 0, len - copied); 395
401 flush_dcache_page(page); 396 zero_user(page, from + copied, len - copied);
402 kunmap_atomic(kaddr, KM_USER0);
403 } 397 }
404 398
405 simple_commit_write(file, page, from, from+copied); 399 if (!PageUptodate(page))
400 SetPageUptodate(page);
401 /*
402 * No need to use i_size_read() here, the i_size
403 * cannot change under us because we hold the i_mutex.
404 */
405 if (last_pos > inode->i_size)
406 i_size_write(inode, last_pos);
406 407
408 set_page_dirty(page);
407 unlock_page(page); 409 unlock_page(page);
408 page_cache_release(page); 410 page_cache_release(page);
409 411
@@ -853,7 +855,6 @@ EXPORT_SYMBOL(simple_getattr);
853EXPORT_SYMBOL(simple_link); 855EXPORT_SYMBOL(simple_link);
854EXPORT_SYMBOL(simple_lookup); 856EXPORT_SYMBOL(simple_lookup);
855EXPORT_SYMBOL(simple_pin_fs); 857EXPORT_SYMBOL(simple_pin_fs);
856EXPORT_UNUSED_SYMBOL(simple_prepare_write);
857EXPORT_SYMBOL(simple_readpage); 858EXPORT_SYMBOL(simple_readpage);
858EXPORT_SYMBOL(simple_release_fs); 859EXPORT_SYMBOL(simple_release_fs);
859EXPORT_SYMBOL(simple_rename); 860EXPORT_SYMBOL(simple_rename);
diff --git a/fs/locks.c b/fs/locks.c
index a8794f233bc9..ae9ded026b7c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1182,8 +1182,9 @@ int __break_lease(struct inode *inode, unsigned int mode)
1182 struct file_lock *fl; 1182 struct file_lock *fl;
1183 unsigned long break_time; 1183 unsigned long break_time;
1184 int i_have_this_lease = 0; 1184 int i_have_this_lease = 0;
1185 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1185 1186
1186 new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK); 1187 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1187 1188
1188 lock_kernel(); 1189 lock_kernel();
1189 1190
@@ -1197,7 +1198,7 @@ int __break_lease(struct inode *inode, unsigned int mode)
1197 if (fl->fl_owner == current->files) 1198 if (fl->fl_owner == current->files)
1198 i_have_this_lease = 1; 1199 i_have_this_lease = 1;
1199 1200
1200 if (mode & FMODE_WRITE) { 1201 if (want_write) {
1201 /* If we want write access, we have to revoke any lease. */ 1202 /* If we want write access, we have to revoke any lease. */
1202 future = F_UNLCK | F_INPROGRESS; 1203 future = F_UNLCK | F_INPROGRESS;
1203 } else if (flock->fl_type & F_INPROGRESS) { 1204 } else if (flock->fl_type & F_INPROGRESS) {
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 74ea82d72164..756f8c93780c 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -17,8 +17,10 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/highuid.h> 18#include <linux/highuid.h>
19#include <linux/vfs.h> 19#include <linux/vfs.h>
20#include <linux/writeback.h>
20 21
21static int minix_write_inode(struct inode * inode, int wait); 22static int minix_write_inode(struct inode *inode,
23 struct writeback_control *wbc);
22static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); 24static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
23static int minix_remount (struct super_block * sb, int * flags, char * data); 25static int minix_remount (struct super_block * sb, int * flags, char * data);
24 26
@@ -552,7 +554,7 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
552 return bh; 554 return bh;
553} 555}
554 556
555static int minix_write_inode(struct inode *inode, int wait) 557static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
556{ 558{
557 int err = 0; 559 int err = 0;
558 struct buffer_head *bh; 560 struct buffer_head *bh;
@@ -563,7 +565,7 @@ static int minix_write_inode(struct inode *inode, int wait)
563 bh = V2_minix_update_inode(inode); 565 bh = V2_minix_update_inode(inode);
564 if (!bh) 566 if (!bh)
565 return -EIO; 567 return -EIO;
566 if (wait && buffer_dirty(bh)) { 568 if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
567 sync_dirty_buffer(bh); 569 sync_dirty_buffer(bh);
568 if (buffer_req(bh) && !buffer_uptodate(bh)) { 570 if (buffer_req(bh) && !buffer_uptodate(bh)) {
569 printk("IO error syncing minix inode [%s:%08lx]\n", 571 printk("IO error syncing minix inode [%s:%08lx]\n",
diff --git a/fs/namei.c b/fs/namei.c
index 06abd2bf473c..3d9d2f965f84 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -497,8 +497,6 @@ static int link_path_walk(const char *, struct nameidata *);
497 497
498static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) 498static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
499{ 499{
500 int res = 0;
501 char *name;
502 if (IS_ERR(link)) 500 if (IS_ERR(link))
503 goto fail; 501 goto fail;
504 502
@@ -509,22 +507,7 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l
509 path_get(&nd->root); 507 path_get(&nd->root);
510 } 508 }
511 509
512 res = link_path_walk(link, nd); 510 return link_path_walk(link, nd);
513 if (nd->depth || res || nd->last_type!=LAST_NORM)
514 return res;
515 /*
516 * If it is an iterative symlinks resolution in open_namei() we
517 * have to copy the last component. And all that crap because of
518 * bloody create() on broken symlinks. Furrfu...
519 */
520 name = __getname();
521 if (unlikely(!name)) {
522 path_put(&nd->path);
523 return -ENOMEM;
524 }
525 strcpy(name, nd->last.name);
526 nd->last.name = name;
527 return 0;
528fail: 511fail:
529 path_put(&nd->path); 512 path_put(&nd->path);
530 return PTR_ERR(link); 513 return PTR_ERR(link);
@@ -546,10 +529,10 @@ static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
546 nd->path.dentry = path->dentry; 529 nd->path.dentry = path->dentry;
547} 530}
548 531
549static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) 532static __always_inline int
533__do_follow_link(struct path *path, struct nameidata *nd, void **p)
550{ 534{
551 int error; 535 int error;
552 void *cookie;
553 struct dentry *dentry = path->dentry; 536 struct dentry *dentry = path->dentry;
554 537
555 touch_atime(path->mnt, dentry); 538 touch_atime(path->mnt, dentry);
@@ -561,9 +544,9 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
561 } 544 }
562 mntget(path->mnt); 545 mntget(path->mnt);
563 nd->last_type = LAST_BIND; 546 nd->last_type = LAST_BIND;
564 cookie = dentry->d_inode->i_op->follow_link(dentry, nd); 547 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
565 error = PTR_ERR(cookie); 548 error = PTR_ERR(*p);
566 if (!IS_ERR(cookie)) { 549 if (!IS_ERR(*p)) {
567 char *s = nd_get_link(nd); 550 char *s = nd_get_link(nd);
568 error = 0; 551 error = 0;
569 if (s) 552 if (s)
@@ -573,8 +556,6 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
573 if (error) 556 if (error)
574 path_put(&nd->path); 557 path_put(&nd->path);
575 } 558 }
576 if (dentry->d_inode->i_op->put_link)
577 dentry->d_inode->i_op->put_link(dentry, nd, cookie);
578 } 559 }
579 return error; 560 return error;
580} 561}
@@ -588,6 +569,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
588 */ 569 */
589static inline int do_follow_link(struct path *path, struct nameidata *nd) 570static inline int do_follow_link(struct path *path, struct nameidata *nd)
590{ 571{
572 void *cookie;
591 int err = -ELOOP; 573 int err = -ELOOP;
592 if (current->link_count >= MAX_NESTED_LINKS) 574 if (current->link_count >= MAX_NESTED_LINKS)
593 goto loop; 575 goto loop;
@@ -601,7 +583,9 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
601 current->link_count++; 583 current->link_count++;
602 current->total_link_count++; 584 current->total_link_count++;
603 nd->depth++; 585 nd->depth++;
604 err = __do_follow_link(path, nd); 586 err = __do_follow_link(path, nd, &cookie);
587 if (!IS_ERR(cookie) && path->dentry->d_inode->i_op->put_link)
588 path->dentry->d_inode->i_op->put_link(path->dentry, nd, cookie);
605 path_put(path); 589 path_put(path);
606 current->link_count--; 590 current->link_count--;
607 nd->depth--; 591 nd->depth--;
@@ -688,33 +672,20 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
688 set_root(nd); 672 set_root(nd);
689 673
690 while(1) { 674 while(1) {
691 struct vfsmount *parent;
692 struct dentry *old = nd->path.dentry; 675 struct dentry *old = nd->path.dentry;
693 676
694 if (nd->path.dentry == nd->root.dentry && 677 if (nd->path.dentry == nd->root.dentry &&
695 nd->path.mnt == nd->root.mnt) { 678 nd->path.mnt == nd->root.mnt) {
696 break; 679 break;
697 } 680 }
698 spin_lock(&dcache_lock);
699 if (nd->path.dentry != nd->path.mnt->mnt_root) { 681 if (nd->path.dentry != nd->path.mnt->mnt_root) {
700 nd->path.dentry = dget(nd->path.dentry->d_parent); 682 /* rare case of legitimate dget_parent()... */
701 spin_unlock(&dcache_lock); 683 nd->path.dentry = dget_parent(nd->path.dentry);
702 dput(old); 684 dput(old);
703 break; 685 break;
704 } 686 }
705 spin_unlock(&dcache_lock); 687 if (!follow_up(&nd->path))
706 spin_lock(&vfsmount_lock);
707 parent = nd->path.mnt->mnt_parent;
708 if (parent == nd->path.mnt) {
709 spin_unlock(&vfsmount_lock);
710 break; 688 break;
711 }
712 mntget(parent);
713 nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint);
714 spin_unlock(&vfsmount_lock);
715 dput(old);
716 mntput(nd->path.mnt);
717 nd->path.mnt = parent;
718 } 689 }
719 follow_mount(&nd->path); 690 follow_mount(&nd->path);
720} 691}
@@ -1346,7 +1317,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
1346 return -ENOENT; 1317 return -ENOENT;
1347 1318
1348 BUG_ON(victim->d_parent->d_inode != dir); 1319 BUG_ON(victim->d_parent->d_inode != dir);
1349 audit_inode_child(victim->d_name.name, victim, dir); 1320 audit_inode_child(victim, dir);
1350 1321
1351 error = inode_permission(dir, MAY_WRITE | MAY_EXEC); 1322 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
1352 if (error) 1323 if (error)
@@ -1387,22 +1358,6 @@ static inline int may_create(struct inode *dir, struct dentry *child)
1387 return inode_permission(dir, MAY_WRITE | MAY_EXEC); 1358 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
1388} 1359}
1389 1360
1390/*
1391 * O_DIRECTORY translates into forcing a directory lookup.
1392 */
1393static inline int lookup_flags(unsigned int f)
1394{
1395 unsigned long retval = LOOKUP_FOLLOW;
1396
1397 if (f & O_NOFOLLOW)
1398 retval &= ~LOOKUP_FOLLOW;
1399
1400 if (f & O_DIRECTORY)
1401 retval |= LOOKUP_DIRECTORY;
1402
1403 return retval;
1404}
1405
1406/* 1361/*
1407 * p1 and p2 should be directories on the same fs. 1362 * p1 and p2 should be directories on the same fs.
1408 */ 1363 */
@@ -1501,7 +1456,7 @@ int may_open(struct path *path, int acc_mode, int flag)
1501 * An append-only file must be opened in append mode for writing. 1456 * An append-only file must be opened in append mode for writing.
1502 */ 1457 */
1503 if (IS_APPEND(inode)) { 1458 if (IS_APPEND(inode)) {
1504 if ((flag & FMODE_WRITE) && !(flag & O_APPEND)) 1459 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
1505 return -EPERM; 1460 return -EPERM;
1506 if (flag & O_TRUNC) 1461 if (flag & O_TRUNC)
1507 return -EPERM; 1462 return -EPERM;
@@ -1545,7 +1500,7 @@ static int handle_truncate(struct path *path)
1545 * what get passed to sys_open(). 1500 * what get passed to sys_open().
1546 */ 1501 */
1547static int __open_namei_create(struct nameidata *nd, struct path *path, 1502static int __open_namei_create(struct nameidata *nd, struct path *path,
1548 int flag, int mode) 1503 int open_flag, int mode)
1549{ 1504{
1550 int error; 1505 int error;
1551 struct dentry *dir = nd->path.dentry; 1506 struct dentry *dir = nd->path.dentry;
@@ -1563,7 +1518,7 @@ out_unlock:
1563 if (error) 1518 if (error)
1564 return error; 1519 return error;
1565 /* Don't check for write permission, don't truncate */ 1520 /* Don't check for write permission, don't truncate */
1566 return may_open(&nd->path, 0, flag & ~O_TRUNC); 1521 return may_open(&nd->path, 0, open_flag & ~O_TRUNC);
1567} 1522}
1568 1523
1569/* 1524/*
@@ -1601,129 +1556,132 @@ static int open_will_truncate(int flag, struct inode *inode)
1601 return (flag & O_TRUNC); 1556 return (flag & O_TRUNC);
1602} 1557}
1603 1558
1604/* 1559static struct file *finish_open(struct nameidata *nd,
1605 * Note that the low bits of the passed in "open_flag" 1560 int open_flag, int acc_mode)
1606 * are not the same as in the local variable "flag". See
1607 * open_to_namei_flags() for more details.
1608 */
1609struct file *do_filp_open(int dfd, const char *pathname,
1610 int open_flag, int mode, int acc_mode)
1611{ 1561{
1612 struct file *filp; 1562 struct file *filp;
1613 struct nameidata nd;
1614 int error;
1615 struct path path;
1616 struct dentry *dir;
1617 int count = 0;
1618 int will_truncate; 1563 int will_truncate;
1619 int flag = open_to_namei_flags(open_flag); 1564 int error;
1620 int force_reval = 0;
1621 1565
1566 will_truncate = open_will_truncate(open_flag, nd->path.dentry->d_inode);
1567 if (will_truncate) {
1568 error = mnt_want_write(nd->path.mnt);
1569 if (error)
1570 goto exit;
1571 }
1572 error = may_open(&nd->path, acc_mode, open_flag);
1573 if (error) {
1574 if (will_truncate)
1575 mnt_drop_write(nd->path.mnt);
1576 goto exit;
1577 }
1578 filp = nameidata_to_filp(nd);
1579 if (!IS_ERR(filp)) {
1580 error = ima_file_check(filp, acc_mode);
1581 if (error) {
1582 fput(filp);
1583 filp = ERR_PTR(error);
1584 }
1585 }
1586 if (!IS_ERR(filp)) {
1587 if (will_truncate) {
1588 error = handle_truncate(&nd->path);
1589 if (error) {
1590 fput(filp);
1591 filp = ERR_PTR(error);
1592 }
1593 }
1594 }
1622 /* 1595 /*
1623 * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only 1596 * It is now safe to drop the mnt write
1624 * check for O_DSYNC if the need any syncing at all we enforce it's 1597 * because the filp has had a write taken
1625 * always set instead of having to deal with possibly weird behaviour 1598 * on its behalf.
1626 * for malicious applications setting only __O_SYNC.
1627 */ 1599 */
1628 if (open_flag & __O_SYNC) 1600 if (will_truncate)
1629 open_flag |= O_DSYNC; 1601 mnt_drop_write(nd->path.mnt);
1630 1602 return filp;
1631 if (!acc_mode)
1632 acc_mode = MAY_OPEN | ACC_MODE(open_flag);
1633 1603
1634 /* O_TRUNC implies we need access checks for write permissions */ 1604exit:
1635 if (flag & O_TRUNC) 1605 if (!IS_ERR(nd->intent.open.file))
1636 acc_mode |= MAY_WRITE; 1606 release_open_intent(nd);
1607 path_put(&nd->path);
1608 return ERR_PTR(error);
1609}
1637 1610
1638 /* Allow the LSM permission hook to distinguish append 1611static struct file *do_last(struct nameidata *nd, struct path *path,
1639 access from general write access. */ 1612 int open_flag, int acc_mode,
1640 if (flag & O_APPEND) 1613 int mode, const char *pathname,
1641 acc_mode |= MAY_APPEND; 1614 int *want_dir)
1615{
1616 struct dentry *dir = nd->path.dentry;
1617 struct file *filp;
1618 int error = -EISDIR;
1642 1619
1643 /* 1620 switch (nd->last_type) {
1644 * The simplest case - just a plain lookup. 1621 case LAST_DOTDOT:
1645 */ 1622 follow_dotdot(nd);
1646 if (!(flag & O_CREAT)) { 1623 dir = nd->path.dentry;
1647 filp = get_empty_filp(); 1624 if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
1648 1625 if (!dir->d_op->d_revalidate(dir, nd)) {
1649 if (filp == NULL) 1626 error = -ESTALE;
1650 return ERR_PTR(-ENFILE); 1627 goto exit;
1651 nd.intent.open.file = filp;
1652 filp->f_flags = open_flag;
1653 nd.intent.open.flags = flag;
1654 nd.intent.open.create_mode = 0;
1655 error = do_path_lookup(dfd, pathname,
1656 lookup_flags(flag)|LOOKUP_OPEN, &nd);
1657 if (IS_ERR(nd.intent.open.file)) {
1658 if (error == 0) {
1659 error = PTR_ERR(nd.intent.open.file);
1660 path_put(&nd.path);
1661 } 1628 }
1662 } else if (error) 1629 }
1663 release_open_intent(&nd); 1630 /* fallthrough */
1664 if (error) 1631 case LAST_DOT:
1665 return ERR_PTR(error); 1632 case LAST_ROOT:
1633 if (open_flag & O_CREAT)
1634 goto exit;
1635 /* fallthrough */
1636 case LAST_BIND:
1637 audit_inode(pathname, dir);
1666 goto ok; 1638 goto ok;
1667 } 1639 }
1668 1640
1669 /* 1641 /* trailing slashes? */
1670 * Create - we need to know the parent. 1642 if (nd->last.name[nd->last.len]) {
1671 */ 1643 if (open_flag & O_CREAT)
1672reval: 1644 goto exit;
1673 error = path_init(dfd, pathname, LOOKUP_PARENT, &nd); 1645 *want_dir = 1;
1674 if (error)
1675 return ERR_PTR(error);
1676 if (force_reval)
1677 nd.flags |= LOOKUP_REVAL;
1678 error = path_walk(pathname, &nd);
1679 if (error) {
1680 if (nd.root.mnt)
1681 path_put(&nd.root);
1682 return ERR_PTR(error);
1683 } 1646 }
1684 if (unlikely(!audit_dummy_context()))
1685 audit_inode(pathname, nd.path.dentry);
1686 1647
1687 /* 1648 /* just plain open? */
1688 * We have the parent and last component. First of all, check 1649 if (!(open_flag & O_CREAT)) {
1689 * that we are not asked to creat(2) an obvious directory - that 1650 error = do_lookup(nd, &nd->last, path);
1690 * will not do. 1651 if (error)
1691 */ 1652 goto exit;
1692 error = -EISDIR; 1653 error = -ENOENT;
1693 if (nd.last_type != LAST_NORM || nd.last.name[nd.last.len]) 1654 if (!path->dentry->d_inode)
1694 goto exit_parent; 1655 goto exit_dput;
1656 if (path->dentry->d_inode->i_op->follow_link)
1657 return NULL;
1658 error = -ENOTDIR;
1659 if (*want_dir & !path->dentry->d_inode->i_op->lookup)
1660 goto exit_dput;
1661 path_to_nameidata(path, nd);
1662 audit_inode(pathname, nd->path.dentry);
1663 goto ok;
1664 }
1695 1665
1696 error = -ENFILE; 1666 /* OK, it's O_CREAT */
1697 filp = get_empty_filp();
1698 if (filp == NULL)
1699 goto exit_parent;
1700 nd.intent.open.file = filp;
1701 filp->f_flags = open_flag;
1702 nd.intent.open.flags = flag;
1703 nd.intent.open.create_mode = mode;
1704 dir = nd.path.dentry;
1705 nd.flags &= ~LOOKUP_PARENT;
1706 nd.flags |= LOOKUP_CREATE | LOOKUP_OPEN;
1707 if (flag & O_EXCL)
1708 nd.flags |= LOOKUP_EXCL;
1709 mutex_lock(&dir->d_inode->i_mutex); 1667 mutex_lock(&dir->d_inode->i_mutex);
1710 path.dentry = lookup_hash(&nd);
1711 path.mnt = nd.path.mnt;
1712 1668
1713do_last: 1669 path->dentry = lookup_hash(nd);
1714 error = PTR_ERR(path.dentry); 1670 path->mnt = nd->path.mnt;
1715 if (IS_ERR(path.dentry)) { 1671
1672 error = PTR_ERR(path->dentry);
1673 if (IS_ERR(path->dentry)) {
1716 mutex_unlock(&dir->d_inode->i_mutex); 1674 mutex_unlock(&dir->d_inode->i_mutex);
1717 goto exit; 1675 goto exit;
1718 } 1676 }
1719 1677
1720 if (IS_ERR(nd.intent.open.file)) { 1678 if (IS_ERR(nd->intent.open.file)) {
1721 error = PTR_ERR(nd.intent.open.file); 1679 error = PTR_ERR(nd->intent.open.file);
1722 goto exit_mutex_unlock; 1680 goto exit_mutex_unlock;
1723 } 1681 }
1724 1682
1725 /* Negative dentry, just create the file */ 1683 /* Negative dentry, just create the file */
1726 if (!path.dentry->d_inode) { 1684 if (!path->dentry->d_inode) {
1727 /* 1685 /*
1728 * This write is needed to ensure that a 1686 * This write is needed to ensure that a
1729 * ro->rw transition does not occur between 1687 * ro->rw transition does not occur between
@@ -1731,18 +1689,16 @@ do_last:
1731 * a permanent write count is taken through 1689 * a permanent write count is taken through
1732 * the 'struct file' in nameidata_to_filp(). 1690 * the 'struct file' in nameidata_to_filp().
1733 */ 1691 */
1734 error = mnt_want_write(nd.path.mnt); 1692 error = mnt_want_write(nd->path.mnt);
1735 if (error) 1693 if (error)
1736 goto exit_mutex_unlock; 1694 goto exit_mutex_unlock;
1737 error = __open_namei_create(&nd, &path, flag, mode); 1695 error = __open_namei_create(nd, path, open_flag, mode);
1738 if (error) { 1696 if (error) {
1739 mnt_drop_write(nd.path.mnt); 1697 mnt_drop_write(nd->path.mnt);
1740 goto exit; 1698 goto exit;
1741 } 1699 }
1742 filp = nameidata_to_filp(&nd); 1700 filp = nameidata_to_filp(nd);
1743 mnt_drop_write(nd.path.mnt); 1701 mnt_drop_write(nd->path.mnt);
1744 if (nd.root.mnt)
1745 path_put(&nd.root);
1746 if (!IS_ERR(filp)) { 1702 if (!IS_ERR(filp)) {
1747 error = ima_file_check(filp, acc_mode); 1703 error = ima_file_check(filp, acc_mode);
1748 if (error) { 1704 if (error) {
@@ -1757,147 +1713,181 @@ do_last:
1757 * It already exists. 1713 * It already exists.
1758 */ 1714 */
1759 mutex_unlock(&dir->d_inode->i_mutex); 1715 mutex_unlock(&dir->d_inode->i_mutex);
1760 audit_inode(pathname, path.dentry); 1716 audit_inode(pathname, path->dentry);
1761 1717
1762 error = -EEXIST; 1718 error = -EEXIST;
1763 if (flag & O_EXCL) 1719 if (open_flag & O_EXCL)
1764 goto exit_dput; 1720 goto exit_dput;
1765 1721
1766 if (__follow_mount(&path)) { 1722 if (__follow_mount(path)) {
1767 error = -ELOOP; 1723 error = -ELOOP;
1768 if (flag & O_NOFOLLOW) 1724 if (open_flag & O_NOFOLLOW)
1769 goto exit_dput; 1725 goto exit_dput;
1770 } 1726 }
1771 1727
1772 error = -ENOENT; 1728 error = -ENOENT;
1773 if (!path.dentry->d_inode) 1729 if (!path->dentry->d_inode)
1774 goto exit_dput; 1730 goto exit_dput;
1775 if (path.dentry->d_inode->i_op->follow_link)
1776 goto do_link;
1777 1731
1778 path_to_nameidata(&path, &nd); 1732 if (path->dentry->d_inode->i_op->follow_link)
1733 return NULL;
1734
1735 path_to_nameidata(path, nd);
1779 error = -EISDIR; 1736 error = -EISDIR;
1780 if (S_ISDIR(path.dentry->d_inode->i_mode)) 1737 if (S_ISDIR(path->dentry->d_inode->i_mode))
1781 goto exit; 1738 goto exit;
1782ok: 1739ok:
1740 filp = finish_open(nd, open_flag, acc_mode);
1741 return filp;
1742
1743exit_mutex_unlock:
1744 mutex_unlock(&dir->d_inode->i_mutex);
1745exit_dput:
1746 path_put_conditional(path, nd);
1747exit:
1748 if (!IS_ERR(nd->intent.open.file))
1749 release_open_intent(nd);
1750 path_put(&nd->path);
1751 return ERR_PTR(error);
1752}
1753
1754/*
1755 * Note that the low bits of the passed in "open_flag"
1756 * are not the same as in the local variable "flag". See
1757 * open_to_namei_flags() for more details.
1758 */
1759struct file *do_filp_open(int dfd, const char *pathname,
1760 int open_flag, int mode, int acc_mode)
1761{
1762 struct file *filp;
1763 struct nameidata nd;
1764 int error;
1765 struct path path;
1766 int count = 0;
1767 int flag = open_to_namei_flags(open_flag);
1768 int force_reval = 0;
1769 int want_dir = open_flag & O_DIRECTORY;
1770
1771 if (!(open_flag & O_CREAT))
1772 mode = 0;
1773
1783 /* 1774 /*
1784 * Consider: 1775 * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
1785 * 1. may_open() truncates a file 1776 * check for O_DSYNC if the need any syncing at all we enforce it's
1786 * 2. a rw->ro mount transition occurs 1777 * always set instead of having to deal with possibly weird behaviour
1787 * 3. nameidata_to_filp() fails due to 1778 * for malicious applications setting only __O_SYNC.
1788 * the ro mount.
1789 * That would be inconsistent, and should
1790 * be avoided. Taking this mnt write here
1791 * ensures that (2) can not occur.
1792 */ 1779 */
1793 will_truncate = open_will_truncate(flag, nd.path.dentry->d_inode); 1780 if (open_flag & __O_SYNC)
1794 if (will_truncate) { 1781 open_flag |= O_DSYNC;
1795 error = mnt_want_write(nd.path.mnt); 1782
1796 if (error) 1783 if (!acc_mode)
1797 goto exit; 1784 acc_mode = MAY_OPEN | ACC_MODE(open_flag);
1798 } 1785
1799 error = may_open(&nd.path, acc_mode, flag); 1786 /* O_TRUNC implies we need access checks for write permissions */
1787 if (open_flag & O_TRUNC)
1788 acc_mode |= MAY_WRITE;
1789
1790 /* Allow the LSM permission hook to distinguish append
1791 access from general write access. */
1792 if (open_flag & O_APPEND)
1793 acc_mode |= MAY_APPEND;
1794
1795 /* find the parent */
1796reval:
1797 error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
1798 if (error)
1799 return ERR_PTR(error);
1800 if (force_reval)
1801 nd.flags |= LOOKUP_REVAL;
1802
1803 current->total_link_count = 0;
1804 error = link_path_walk(pathname, &nd);
1800 if (error) { 1805 if (error) {
1801 if (will_truncate) 1806 filp = ERR_PTR(error);
1802 mnt_drop_write(nd.path.mnt); 1807 goto out;
1803 goto exit;
1804 } 1808 }
1805 filp = nameidata_to_filp(&nd); 1809 if (unlikely(!audit_dummy_context()) && (open_flag & O_CREAT))
1806 if (!IS_ERR(filp)) { 1810 audit_inode(pathname, nd.path.dentry);
1807 error = ima_file_check(filp, acc_mode); 1811
1808 if (error) { 1812 /*
1809 fput(filp); 1813 * We have the parent and last component.
1814 */
1815
1816 error = -ENFILE;
1817 filp = get_empty_filp();
1818 if (filp == NULL)
1819 goto exit_parent;
1820 nd.intent.open.file = filp;
1821 filp->f_flags = open_flag;
1822 nd.intent.open.flags = flag;
1823 nd.intent.open.create_mode = mode;
1824 nd.flags &= ~LOOKUP_PARENT;
1825 nd.flags |= LOOKUP_OPEN;
1826 if (open_flag & O_CREAT) {
1827 nd.flags |= LOOKUP_CREATE;
1828 if (open_flag & O_EXCL)
1829 nd.flags |= LOOKUP_EXCL;
1830 }
1831 filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
1832 while (unlikely(!filp)) { /* trailing symlink */
1833 struct path holder;
1834 struct inode *inode = path.dentry->d_inode;
1835 void *cookie;
1836 error = -ELOOP;
1837 /* S_ISDIR part is a temporary automount kludge */
1838 if ((open_flag & O_NOFOLLOW) && !S_ISDIR(inode->i_mode))
1839 goto exit_dput;
1840 if (count++ == 32)
1841 goto exit_dput;
1842 /*
1843 * This is subtle. Instead of calling do_follow_link() we do
1844 * the thing by hands. The reason is that this way we have zero
1845 * link_count and path_walk() (called from ->follow_link)
1846 * honoring LOOKUP_PARENT. After that we have the parent and
1847 * last component, i.e. we are in the same situation as after
1848 * the first path_walk(). Well, almost - if the last component
1849 * is normal we get its copy stored in nd->last.name and we will
1850 * have to putname() it when we are done. Procfs-like symlinks
1851 * just set LAST_BIND.
1852 */
1853 nd.flags |= LOOKUP_PARENT;
1854 error = security_inode_follow_link(path.dentry, &nd);
1855 if (error)
1856 goto exit_dput;
1857 error = __do_follow_link(&path, &nd, &cookie);
1858 if (unlikely(error)) {
1859 /* nd.path had been dropped */
1860 if (!IS_ERR(cookie) && inode->i_op->put_link)
1861 inode->i_op->put_link(path.dentry, &nd, cookie);
1862 path_put(&path);
1863 release_open_intent(&nd);
1810 filp = ERR_PTR(error); 1864 filp = ERR_PTR(error);
1865 goto out;
1811 } 1866 }
1867 holder = path;
1868 nd.flags &= ~LOOKUP_PARENT;
1869 filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
1870 if (inode->i_op->put_link)
1871 inode->i_op->put_link(holder.dentry, &nd, cookie);
1872 path_put(&holder);
1812 } 1873 }
1813 if (!IS_ERR(filp)) { 1874out:
1814 if (will_truncate) {
1815 error = handle_truncate(&nd.path);
1816 if (error) {
1817 fput(filp);
1818 filp = ERR_PTR(error);
1819 }
1820 }
1821 }
1822 /*
1823 * It is now safe to drop the mnt write
1824 * because the filp has had a write taken
1825 * on its behalf.
1826 */
1827 if (will_truncate)
1828 mnt_drop_write(nd.path.mnt);
1829 if (nd.root.mnt) 1875 if (nd.root.mnt)
1830 path_put(&nd.root); 1876 path_put(&nd.root);
1877 if (filp == ERR_PTR(-ESTALE) && !force_reval) {
1878 force_reval = 1;
1879 goto reval;
1880 }
1831 return filp; 1881 return filp;
1832 1882
1833exit_mutex_unlock:
1834 mutex_unlock(&dir->d_inode->i_mutex);
1835exit_dput: 1883exit_dput:
1836 path_put_conditional(&path, &nd); 1884 path_put_conditional(&path, &nd);
1837exit:
1838 if (!IS_ERR(nd.intent.open.file)) 1885 if (!IS_ERR(nd.intent.open.file))
1839 release_open_intent(&nd); 1886 release_open_intent(&nd);
1840exit_parent: 1887exit_parent:
1841 if (nd.root.mnt)
1842 path_put(&nd.root);
1843 path_put(&nd.path); 1888 path_put(&nd.path);
1844 return ERR_PTR(error); 1889 filp = ERR_PTR(error);
1845 1890 goto out;
1846do_link:
1847 error = -ELOOP;
1848 if (flag & O_NOFOLLOW)
1849 goto exit_dput;
1850 /*
1851 * This is subtle. Instead of calling do_follow_link() we do the
1852 * thing by hands. The reason is that this way we have zero link_count
1853 * and path_walk() (called from ->follow_link) honoring LOOKUP_PARENT.
1854 * After that we have the parent and last component, i.e.
1855 * we are in the same situation as after the first path_walk().
1856 * Well, almost - if the last component is normal we get its copy
1857 * stored in nd->last.name and we will have to putname() it when we
1858 * are done. Procfs-like symlinks just set LAST_BIND.
1859 */
1860 nd.flags |= LOOKUP_PARENT;
1861 error = security_inode_follow_link(path.dentry, &nd);
1862 if (error)
1863 goto exit_dput;
1864 error = __do_follow_link(&path, &nd);
1865 path_put(&path);
1866 if (error) {
1867 /* Does someone understand code flow here? Or it is only
1868 * me so stupid? Anathema to whoever designed this non-sense
1869 * with "intent.open".
1870 */
1871 release_open_intent(&nd);
1872 if (nd.root.mnt)
1873 path_put(&nd.root);
1874 if (error == -ESTALE && !force_reval) {
1875 force_reval = 1;
1876 goto reval;
1877 }
1878 return ERR_PTR(error);
1879 }
1880 nd.flags &= ~LOOKUP_PARENT;
1881 if (nd.last_type == LAST_BIND)
1882 goto ok;
1883 error = -EISDIR;
1884 if (nd.last_type != LAST_NORM)
1885 goto exit;
1886 if (nd.last.name[nd.last.len]) {
1887 __putname(nd.last.name);
1888 goto exit;
1889 }
1890 error = -ELOOP;
1891 if (count++==32) {
1892 __putname(nd.last.name);
1893 goto exit;
1894 }
1895 dir = nd.path.dentry;
1896 mutex_lock(&dir->d_inode->i_mutex);
1897 path.dentry = lookup_hash(&nd);
1898 path.mnt = nd.path.mnt;
1899 __putname(nd.last.name);
1900 goto do_last;
1901} 1891}
1902 1892
1903/** 1893/**
@@ -2264,8 +2254,11 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
2264 error = -EBUSY; 2254 error = -EBUSY;
2265 else { 2255 else {
2266 error = security_inode_unlink(dir, dentry); 2256 error = security_inode_unlink(dir, dentry);
2267 if (!error) 2257 if (!error) {
2268 error = dir->i_op->unlink(dir, dentry); 2258 error = dir->i_op->unlink(dir, dentry);
2259 if (!error)
2260 dentry->d_inode->i_flags |= S_DEAD;
2261 }
2269 } 2262 }
2270 mutex_unlock(&dentry->d_inode->i_mutex); 2263 mutex_unlock(&dentry->d_inode->i_mutex);
2271 2264
@@ -2616,6 +2609,8 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
2616 else 2609 else
2617 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); 2610 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
2618 if (!error) { 2611 if (!error) {
2612 if (target)
2613 target->i_flags |= S_DEAD;
2619 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 2614 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
2620 d_move(old_dentry, new_dentry); 2615 d_move(old_dentry, new_dentry);
2621 } 2616 }
@@ -2655,11 +2650,9 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2655 error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); 2650 error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
2656 else 2651 else
2657 error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); 2652 error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
2658 if (!error) { 2653 if (!error)
2659 const char *new_name = old_dentry->d_name.name; 2654 fsnotify_move(old_dir, new_dir, old_name, is_dir,
2660 fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir,
2661 new_dentry->d_inode, old_dentry); 2655 new_dentry->d_inode, old_dentry);
2662 }
2663 fsnotify_oldname_free(old_name); 2656 fsnotify_oldname_free(old_name);
2664 2657
2665 return error; 2658 return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index c768f733c8d6..8174c8ab5c70 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -573,7 +573,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
573 mnt->mnt_master = old; 573 mnt->mnt_master = old;
574 CLEAR_MNT_SHARED(mnt); 574 CLEAR_MNT_SHARED(mnt);
575 } else if (!(flag & CL_PRIVATE)) { 575 } else if (!(flag & CL_PRIVATE)) {
576 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) 576 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
577 list_add(&mnt->mnt_share, &old->mnt_share); 577 list_add(&mnt->mnt_share, &old->mnt_share);
578 if (IS_MNT_SLAVE(old)) 578 if (IS_MNT_SLAVE(old))
579 list_add(&mnt->mnt_slave, &old->mnt_slave); 579 list_add(&mnt->mnt_slave, &old->mnt_slave);
@@ -737,6 +737,21 @@ static void m_stop(struct seq_file *m, void *v)
737 up_read(&namespace_sem); 737 up_read(&namespace_sem);
738} 738}
739 739
740int mnt_had_events(struct proc_mounts *p)
741{
742 struct mnt_namespace *ns = p->ns;
743 int res = 0;
744
745 spin_lock(&vfsmount_lock);
746 if (p->event != ns->event) {
747 p->event = ns->event;
748 res = 1;
749 }
750 spin_unlock(&vfsmount_lock);
751
752 return res;
753}
754
740struct proc_fs_info { 755struct proc_fs_info {
741 int flag; 756 int flag;
742 const char *str; 757 const char *str;
@@ -1121,8 +1136,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1121{ 1136{
1122 struct path path; 1137 struct path path;
1123 int retval; 1138 int retval;
1139 int lookup_flags = 0;
1124 1140
1125 retval = user_path(name, &path); 1141 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1142 return -EINVAL;
1143
1144 if (!(flags & UMOUNT_NOFOLLOW))
1145 lookup_flags |= LOOKUP_FOLLOW;
1146
1147 retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1126 if (retval) 1148 if (retval)
1127 goto out; 1149 goto out;
1128 retval = -EINVAL; 1150 retval = -EINVAL;
@@ -1246,6 +1268,21 @@ void drop_collected_mounts(struct vfsmount *mnt)
1246 release_mounts(&umount_list); 1268 release_mounts(&umount_list);
1247} 1269}
1248 1270
1271int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1272 struct vfsmount *root)
1273{
1274 struct vfsmount *mnt;
1275 int res = f(root, arg);
1276 if (res)
1277 return res;
1278 list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
1279 res = f(mnt, arg);
1280 if (res)
1281 return res;
1282 }
1283 return 0;
1284}
1285
1249static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end) 1286static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1250{ 1287{
1251 struct vfsmount *p; 1288 struct vfsmount *p;
@@ -1538,7 +1575,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
1538 err = do_remount_sb(sb, flags, data, 0); 1575 err = do_remount_sb(sb, flags, data, 0);
1539 if (!err) { 1576 if (!err) {
1540 spin_lock(&vfsmount_lock); 1577 spin_lock(&vfsmount_lock);
1541 mnt_flags |= path->mnt->mnt_flags & MNT_PNODE_MASK; 1578 mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
1542 path->mnt->mnt_flags = mnt_flags; 1579 path->mnt->mnt_flags = mnt_flags;
1543 spin_unlock(&vfsmount_lock); 1580 spin_unlock(&vfsmount_lock);
1544 } 1581 }
@@ -1671,7 +1708,7 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
1671{ 1708{
1672 int err; 1709 int err;
1673 1710
1674 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD); 1711 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
1675 1712
1676 down_write(&namespace_sem); 1713 down_write(&namespace_sem);
1677 /* Something was mounted here while we slept */ 1714 /* Something was mounted here while we slept */
@@ -2314,17 +2351,13 @@ void __init mnt_init(void)
2314 2351
2315void put_mnt_ns(struct mnt_namespace *ns) 2352void put_mnt_ns(struct mnt_namespace *ns)
2316{ 2353{
2317 struct vfsmount *root;
2318 LIST_HEAD(umount_list); 2354 LIST_HEAD(umount_list);
2319 2355
2320 if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock)) 2356 if (!atomic_dec_and_test(&ns->count))
2321 return; 2357 return;
2322 root = ns->root;
2323 ns->root = NULL;
2324 spin_unlock(&vfsmount_lock);
2325 down_write(&namespace_sem); 2358 down_write(&namespace_sem);
2326 spin_lock(&vfsmount_lock); 2359 spin_lock(&vfsmount_lock);
2327 umount_tree(root, 0, &umount_list); 2360 umount_tree(ns->root, 0, &umount_list);
2328 spin_unlock(&vfsmount_lock); 2361 spin_unlock(&vfsmount_lock);
2329 up_write(&namespace_sem); 2362 up_write(&namespace_sem);
2330 release_mounts(&umount_list); 2363 release_mounts(&umount_list);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f141bde7756a..7f9ecc46f3fb 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -97,16 +97,12 @@ u64 nfs_compat_user_ino64(u64 fileid)
97 return ino; 97 return ino;
98} 98}
99 99
100int nfs_write_inode(struct inode *inode, int sync) 100int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
101{ 101{
102 int ret; 102 int ret;
103 103
104 if (sync) { 104 ret = nfs_commit_inode(inode,
105 ret = filemap_fdatawait(inode->i_mapping); 105 wbc->sync_mode == WB_SYNC_ALL ? FLUSH_SYNC : 0);
106 if (ret == 0)
107 ret = nfs_commit_inode(inode, FLUSH_SYNC);
108 } else
109 ret = nfs_commit_inode(inode, 0);
110 if (ret >= 0) 106 if (ret >= 0)
111 return 0; 107 return 0;
112 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 108 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
@@ -574,14 +570,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
574 nfs_revalidate_inode(server, inode); 570 nfs_revalidate_inode(server, inode);
575} 571}
576 572
577static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred) 573static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred)
578{ 574{
579 struct nfs_open_context *ctx; 575 struct nfs_open_context *ctx;
580 576
581 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 577 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
582 if (ctx != NULL) { 578 if (ctx != NULL) {
583 ctx->path.dentry = dget(dentry); 579 ctx->path = *path;
584 ctx->path.mnt = mntget(mnt); 580 path_get(&ctx->path);
585 ctx->cred = get_rpccred(cred); 581 ctx->cred = get_rpccred(cred);
586 ctx->state = NULL; 582 ctx->state = NULL;
587 ctx->lockowner = current->files; 583 ctx->lockowner = current->files;
@@ -686,7 +682,7 @@ int nfs_open(struct inode *inode, struct file *filp)
686 cred = rpc_lookup_cred(); 682 cred = rpc_lookup_cred();
687 if (IS_ERR(cred)) 683 if (IS_ERR(cred))
688 return PTR_ERR(cred); 684 return PTR_ERR(cred);
689 ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred); 685 ctx = alloc_nfs_open_context(&filp->f_path, cred);
690 put_rpccred(cred); 686 put_rpccred(cred);
691 if (ctx == NULL) 687 if (ctx == NULL)
692 return -ENOMEM; 688 return -ENOMEM;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 29e464d23b32..11f82f03c5de 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -211,7 +211,7 @@ extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
211extern struct workqueue_struct *nfsiod_workqueue; 211extern struct workqueue_struct *nfsiod_workqueue;
212extern struct inode *nfs_alloc_inode(struct super_block *sb); 212extern struct inode *nfs_alloc_inode(struct super_block *sb);
213extern void nfs_destroy_inode(struct inode *); 213extern void nfs_destroy_inode(struct inode *);
214extern int nfs_write_inode(struct inode *,int); 214extern int nfs_write_inode(struct inode *, struct writeback_control *);
215extern void nfs_clear_inode(struct inode *); 215extern void nfs_clear_inode(struct inode *);
216#ifdef CONFIG_NFS_V4 216#ifdef CONFIG_NFS_V4
217extern void nfs4_clear_inode(struct inode *); 217extern void nfs4_clear_inode(struct inode *);
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index 46d779abafd3..1d8d5c813b01 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -57,12 +57,12 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
57} 57}
58#endif 58#endif
59 59
60static inline struct nfs_iostats *nfs_alloc_iostats(void) 60static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
61{ 61{
62 return alloc_percpu(struct nfs_iostats); 62 return alloc_percpu(struct nfs_iostats);
63} 63}
64 64
65static inline void nfs_free_iostats(struct nfs_iostats *stats) 65static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
66{ 66{
67 if (stats != NULL) 67 if (stats != NULL)
68 free_percpu(stats); 68 free_percpu(stats);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 375f0fae2c6a..84d83be25a98 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -724,8 +724,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
724 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); 724 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
725 if (p->o_arg.seqid == NULL) 725 if (p->o_arg.seqid == NULL)
726 goto err_free; 726 goto err_free;
727 p->path.mnt = mntget(path->mnt); 727 path_get(path);
728 p->path.dentry = dget(path->dentry); 728 p->path = *path;
729 p->dir = parent; 729 p->dir = parent;
730 p->owner = sp; 730 p->owner = sp;
731 atomic_inc(&sp->so_count); 731 atomic_inc(&sp->so_count);
@@ -1944,8 +1944,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
1944 calldata->res.seqid = calldata->arg.seqid; 1944 calldata->res.seqid = calldata->arg.seqid;
1945 calldata->res.server = server; 1945 calldata->res.server = server;
1946 calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; 1946 calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
1947 calldata->path.mnt = mntget(path->mnt); 1947 path_get(path);
1948 calldata->path.dentry = dget(path->dentry); 1948 calldata->path = *path;
1949 1949
1950 msg.rpc_argp = &calldata->arg, 1950 msg.rpc_argp = &calldata->arg,
1951 msg.rpc_resp = &calldata->res, 1951 msg.rpc_resp = &calldata->res,
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index d3854d94b7cf..bf9cbd242ddd 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -36,10 +36,9 @@ static struct file *do_open(char *name, int flags)
36 return ERR_PTR(error); 36 return ERR_PTR(error);
37 37
38 if (flags == O_RDWR) 38 if (flags == O_RDWR)
39 error = may_open(&nd.path, MAY_READ|MAY_WRITE, 39 error = may_open(&nd.path, MAY_READ|MAY_WRITE, flags);
40 FMODE_READ|FMODE_WRITE);
41 else 40 else
42 error = may_open(&nd.path, MAY_WRITE, FMODE_WRITE); 41 error = may_open(&nd.path, MAY_WRITE, flags);
43 42
44 if (!error) 43 if (!error)
45 return dentry_open(nd.path.dentry, nd.path.mnt, flags, 44 return dentry_open(nd.path.dentry, nd.path.mnt, flags,
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a8587e90fd5a..bbf72d8f9fc0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2121,9 +2121,15 @@ out_acl:
2121 * and this is the root of a cross-mounted filesystem. 2121 * and this is the root of a cross-mounted filesystem.
2122 */ 2122 */
2123 if (ignore_crossmnt == 0 && 2123 if (ignore_crossmnt == 0 &&
2124 exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) { 2124 dentry == exp->ex_path.mnt->mnt_root) {
2125 err = vfs_getattr(exp->ex_path.mnt->mnt_parent, 2125 struct path path = exp->ex_path;
2126 exp->ex_path.mnt->mnt_mountpoint, &stat); 2126 path_get(&path);
2127 while (follow_up(&path)) {
2128 if (path.dentry != path.mnt->mnt_root)
2129 break;
2130 }
2131 err = vfs_getattr(path.mnt, path.dentry, &stat);
2132 path_put(&path);
2127 if (err) 2133 if (err)
2128 goto out_nfserr; 2134 goto out_nfserr;
2129 } 2135 }
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 09e9fc043600..8eca17df4f63 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -360,7 +360,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
360 * If we are changing the size of the file, then 360 * If we are changing the size of the file, then
361 * we need to break all leases. 361 * we need to break all leases.
362 */ 362 */
363 host_err = break_lease(inode, FMODE_WRITE | O_NONBLOCK); 363 host_err = break_lease(inode, O_WRONLY | O_NONBLOCK);
364 if (host_err == -EWOULDBLOCK) 364 if (host_err == -EWOULDBLOCK)
365 host_err = -ETIMEDOUT; 365 host_err = -ETIMEDOUT;
366 if (host_err) /* ENOMEM or EWOULDBLOCK */ 366 if (host_err) /* ENOMEM or EWOULDBLOCK */
@@ -732,7 +732,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
732 * Check to see if there are any leases on this file. 732 * Check to see if there are any leases on this file.
733 * This may block while leases are broken. 733 * This may block while leases are broken.
734 */ 734 */
735 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? FMODE_WRITE : 0)); 735 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
736 if (host_err == -EWOULDBLOCK) 736 if (host_err == -EWOULDBLOCK)
737 host_err = -ETIMEDOUT; 737 host_err = -ETIMEDOUT;
738 if (host_err) /* NOMEM or WOULDBLOCK */ 738 if (host_err) /* NOMEM or WOULDBLOCK */
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 187dd07ba86c..9d1e5de91afb 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -388,8 +388,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
388 ret = -ENOENT; 388 ret = -ENOENT;
389 goto out; 389 goto out;
390 } 390 }
391 if (blocknrp != NULL) 391 *blocknrp = blocknr;
392 *blocknrp = blocknr;
393 392
394 out: 393 out:
395 kunmap_atomic(kaddr, KM_USER0); 394 kunmap_atomic(kaddr, KM_USER0);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 76d803e060a9..0092840492ee 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -224,7 +224,7 @@ fail:
224 * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller. 224 * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
225 */ 225 */
226static int 226static int
227nilfs_match(int len, const char * const name, struct nilfs_dir_entry *de) 227nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
228{ 228{
229 if (len != de->name_len) 229 if (len != de->name_len)
230 return 0; 230 return 0;
@@ -349,11 +349,11 @@ done:
349 * Entry is guaranteed to be valid. 349 * Entry is guaranteed to be valid.
350 */ 350 */
351struct nilfs_dir_entry * 351struct nilfs_dir_entry *
352nilfs_find_entry(struct inode *dir, struct dentry *dentry, 352nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
353 struct page **res_page) 353 struct page **res_page)
354{ 354{
355 const char *name = dentry->d_name.name; 355 const unsigned char *name = qstr->name;
356 int namelen = dentry->d_name.len; 356 int namelen = qstr->len;
357 unsigned reclen = NILFS_DIR_REC_LEN(namelen); 357 unsigned reclen = NILFS_DIR_REC_LEN(namelen);
358 unsigned long start, n; 358 unsigned long start, n;
359 unsigned long npages = dir_pages(dir); 359 unsigned long npages = dir_pages(dir);
@@ -424,13 +424,13 @@ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
424 return de; 424 return de;
425} 425}
426 426
427ino_t nilfs_inode_by_name(struct inode *dir, struct dentry *dentry) 427ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
428{ 428{
429 ino_t res = 0; 429 ino_t res = 0;
430 struct nilfs_dir_entry *de; 430 struct nilfs_dir_entry *de;
431 struct page *page; 431 struct page *page;
432 432
433 de = nilfs_find_entry(dir, dentry, &page); 433 de = nilfs_find_entry(dir, qstr, &page);
434 if (de) { 434 if (de) {
435 res = le64_to_cpu(de->inode); 435 res = le64_to_cpu(de->inode);
436 kunmap(page); 436 kunmap(page);
@@ -465,7 +465,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
465int nilfs_add_link(struct dentry *dentry, struct inode *inode) 465int nilfs_add_link(struct dentry *dentry, struct inode *inode)
466{ 466{
467 struct inode *dir = dentry->d_parent->d_inode; 467 struct inode *dir = dentry->d_parent->d_inode;
468 const char *name = dentry->d_name.name; 468 const unsigned char *name = dentry->d_name.name;
469 int namelen = dentry->d_name.len; 469 int namelen = dentry->d_name.len;
470 unsigned chunk_size = nilfs_chunk_size(dir); 470 unsigned chunk_size = nilfs_chunk_size(dir);
471 unsigned reclen = NILFS_DIR_REC_LEN(namelen); 471 unsigned reclen = NILFS_DIR_REC_LEN(namelen);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index d6b2b83de363..313d0a21da48 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -26,6 +26,7 @@
26#include <linux/capability.h> /* capable() */ 26#include <linux/capability.h> /* capable() */
27#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */ 27#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */
29#include <linux/nilfs2_fs.h> 30#include <linux/nilfs2_fs.h>
30#include "nilfs.h" 31#include "nilfs.h"
31#include "segment.h" 32#include "segment.h"
@@ -107,20 +108,28 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
107 108
108 if (!capable(CAP_SYS_ADMIN)) 109 if (!capable(CAP_SYS_ADMIN))
109 return -EPERM; 110 return -EPERM;
111
112 ret = mnt_want_write(filp->f_path.mnt);
113 if (ret)
114 return ret;
115
116 ret = -EFAULT;
110 if (copy_from_user(&cpmode, argp, sizeof(cpmode))) 117 if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
111 return -EFAULT; 118 goto out;
112 119
113 mutex_lock(&nilfs->ns_mount_mutex); 120 mutex_lock(&nilfs->ns_mount_mutex);
121
114 nilfs_transaction_begin(inode->i_sb, &ti, 0); 122 nilfs_transaction_begin(inode->i_sb, &ti, 0);
115 ret = nilfs_cpfile_change_cpmode( 123 ret = nilfs_cpfile_change_cpmode(
116 cpfile, cpmode.cm_cno, cpmode.cm_mode); 124 cpfile, cpmode.cm_cno, cpmode.cm_mode);
117 if (unlikely(ret < 0)) { 125 if (unlikely(ret < 0))
118 nilfs_transaction_abort(inode->i_sb); 126 nilfs_transaction_abort(inode->i_sb);
119 mutex_unlock(&nilfs->ns_mount_mutex); 127 else
120 return ret; 128 nilfs_transaction_commit(inode->i_sb); /* never fails */
121 } 129
122 nilfs_transaction_commit(inode->i_sb); /* never fails */
123 mutex_unlock(&nilfs->ns_mount_mutex); 130 mutex_unlock(&nilfs->ns_mount_mutex);
131out:
132 mnt_drop_write(filp->f_path.mnt);
124 return ret; 133 return ret;
125} 134}
126 135
@@ -135,16 +144,23 @@ nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
135 144
136 if (!capable(CAP_SYS_ADMIN)) 145 if (!capable(CAP_SYS_ADMIN))
137 return -EPERM; 146 return -EPERM;
147
148 ret = mnt_want_write(filp->f_path.mnt);
149 if (ret)
150 return ret;
151
152 ret = -EFAULT;
138 if (copy_from_user(&cno, argp, sizeof(cno))) 153 if (copy_from_user(&cno, argp, sizeof(cno)))
139 return -EFAULT; 154 goto out;
140 155
141 nilfs_transaction_begin(inode->i_sb, &ti, 0); 156 nilfs_transaction_begin(inode->i_sb, &ti, 0);
142 ret = nilfs_cpfile_delete_checkpoint(cpfile, cno); 157 ret = nilfs_cpfile_delete_checkpoint(cpfile, cno);
143 if (unlikely(ret < 0)) { 158 if (unlikely(ret < 0))
144 nilfs_transaction_abort(inode->i_sb); 159 nilfs_transaction_abort(inode->i_sb);
145 return ret; 160 else
146 } 161 nilfs_transaction_commit(inode->i_sb); /* never fails */
147 nilfs_transaction_commit(inode->i_sb); /* never fails */ 162out:
163 mnt_drop_write(filp->f_path.mnt);
148 return ret; 164 return ret;
149} 165}
150 166
@@ -496,12 +512,19 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
496 if (!capable(CAP_SYS_ADMIN)) 512 if (!capable(CAP_SYS_ADMIN))
497 return -EPERM; 513 return -EPERM;
498 514
515 ret = mnt_want_write(filp->f_path.mnt);
516 if (ret)
517 return ret;
518
519 ret = -EFAULT;
499 if (copy_from_user(argv, argp, sizeof(argv))) 520 if (copy_from_user(argv, argp, sizeof(argv)))
500 return -EFAULT; 521 goto out;
501 522
523 ret = -EINVAL;
502 nsegs = argv[4].v_nmembs; 524 nsegs = argv[4].v_nmembs;
503 if (argv[4].v_size != argsz[4]) 525 if (argv[4].v_size != argsz[4])
504 return -EINVAL; 526 goto out;
527
505 /* 528 /*
506 * argv[4] points to segment numbers this ioctl cleans. We 529 * argv[4] points to segment numbers this ioctl cleans. We
507 * use kmalloc() for its buffer because memory used for the 530 * use kmalloc() for its buffer because memory used for the
@@ -509,9 +532,10 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
509 */ 532 */
510 kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base, 533 kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base,
511 nsegs * sizeof(__u64)); 534 nsegs * sizeof(__u64));
512 if (IS_ERR(kbufs[4])) 535 if (IS_ERR(kbufs[4])) {
513 return PTR_ERR(kbufs[4]); 536 ret = PTR_ERR(kbufs[4]);
514 537 goto out;
538 }
515 nilfs = NILFS_SB(inode->i_sb)->s_nilfs; 539 nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
516 540
517 for (n = 0; n < 4; n++) { 541 for (n = 0; n < 4; n++) {
@@ -563,10 +587,12 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
563 nilfs_remove_all_gcinode(nilfs); 587 nilfs_remove_all_gcinode(nilfs);
564 clear_nilfs_gc_running(nilfs); 588 clear_nilfs_gc_running(nilfs);
565 589
566 out_free: 590out_free:
567 while (--n >= 0) 591 while (--n >= 0)
568 vfree(kbufs[n]); 592 vfree(kbufs[n]);
569 kfree(kbufs[4]); 593 kfree(kbufs[4]);
594out:
595 mnt_drop_write(filp->f_path.mnt);
570 return ret; 596 return ret;
571} 597}
572 598
@@ -575,13 +601,17 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
575{ 601{
576 __u64 cno; 602 __u64 cno;
577 int ret; 603 int ret;
604 struct the_nilfs *nilfs;
578 605
579 ret = nilfs_construct_segment(inode->i_sb); 606 ret = nilfs_construct_segment(inode->i_sb);
580 if (ret < 0) 607 if (ret < 0)
581 return ret; 608 return ret;
582 609
583 if (argp != NULL) { 610 if (argp != NULL) {
584 cno = NILFS_SB(inode->i_sb)->s_nilfs->ns_cno - 1; 611 nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
612 down_read(&nilfs->ns_segctor_sem);
613 cno = nilfs->ns_cno - 1;
614 up_read(&nilfs->ns_segctor_sem);
585 if (copy_to_user(argp, &cno, sizeof(cno))) 615 if (copy_to_user(argp, &cno, sizeof(cno)))
586 return -EFAULT; 616 return -EFAULT;
587 } 617 }
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 07ba838ef089..ad6ed2cf19b4 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -67,7 +67,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
67 if (dentry->d_name.len > NILFS_NAME_LEN) 67 if (dentry->d_name.len > NILFS_NAME_LEN)
68 return ERR_PTR(-ENAMETOOLONG); 68 return ERR_PTR(-ENAMETOOLONG);
69 69
70 ino = nilfs_inode_by_name(dir, dentry); 70 ino = nilfs_inode_by_name(dir, &dentry->d_name);
71 inode = NULL; 71 inode = NULL;
72 if (ino) { 72 if (ino) {
73 inode = nilfs_iget(dir->i_sb, ino); 73 inode = nilfs_iget(dir->i_sb, ino);
@@ -81,10 +81,7 @@ struct dentry *nilfs_get_parent(struct dentry *child)
81{ 81{
82 unsigned long ino; 82 unsigned long ino;
83 struct inode *inode; 83 struct inode *inode;
84 struct dentry dotdot; 84 struct qstr dotdot = {.name = "..", .len = 2};
85
86 dotdot.d_name.name = "..";
87 dotdot.d_name.len = 2;
88 85
89 ino = nilfs_inode_by_name(child->d_inode, &dotdot); 86 ino = nilfs_inode_by_name(child->d_inode, &dotdot);
90 if (!ino) 87 if (!ino)
@@ -296,7 +293,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
296 int err; 293 int err;
297 294
298 err = -ENOENT; 295 err = -ENOENT;
299 de = nilfs_find_entry(dir, dentry, &page); 296 de = nilfs_find_entry(dir, &dentry->d_name, &page);
300 if (!de) 297 if (!de)
301 goto out; 298 goto out;
302 299
@@ -389,7 +386,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
389 return err; 386 return err;
390 387
391 err = -ENOENT; 388 err = -ENOENT;
392 old_de = nilfs_find_entry(old_dir, old_dentry, &old_page); 389 old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
393 if (!old_de) 390 if (!old_de)
394 goto out; 391 goto out;
395 392
@@ -409,7 +406,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
409 goto out_dir; 406 goto out_dir;
410 407
411 err = -ENOENT; 408 err = -ENOENT;
412 new_de = nilfs_find_entry(new_dir, new_dentry, &new_page); 409 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
413 if (!new_de) 410 if (!new_de)
414 goto out_dir; 411 goto out_dir;
415 inc_nlink(old_inode); 412 inc_nlink(old_inode);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 4da6f67e9a91..8723e5bfd071 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -217,10 +217,10 @@ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
217 217
218/* dir.c */ 218/* dir.c */
219extern int nilfs_add_link(struct dentry *, struct inode *); 219extern int nilfs_add_link(struct dentry *, struct inode *);
220extern ino_t nilfs_inode_by_name(struct inode *, struct dentry *); 220extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
221extern int nilfs_make_empty(struct inode *, struct inode *); 221extern int nilfs_make_empty(struct inode *, struct inode *);
222extern struct nilfs_dir_entry * 222extern struct nilfs_dir_entry *
223nilfs_find_entry(struct inode *, struct dentry *, struct page **); 223nilfs_find_entry(struct inode *, const struct qstr *, struct page **);
224extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *); 224extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
225extern int nilfs_empty_dir(struct inode *); 225extern int nilfs_empty_dir(struct inode *);
226extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **); 226extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index c9c96c7825dc..017bedc761a0 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -39,7 +39,6 @@ enum {
39 NILFS_SEG_FAIL_IO, 39 NILFS_SEG_FAIL_IO,
40 NILFS_SEG_FAIL_MAGIC, 40 NILFS_SEG_FAIL_MAGIC,
41 NILFS_SEG_FAIL_SEQ, 41 NILFS_SEG_FAIL_SEQ,
42 NILFS_SEG_FAIL_CHECKSUM_SEGSUM,
43 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT, 42 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
44 NILFS_SEG_FAIL_CHECKSUM_FULL, 43 NILFS_SEG_FAIL_CHECKSUM_FULL,
45 NILFS_SEG_FAIL_CONSISTENCY, 44 NILFS_SEG_FAIL_CONSISTENCY,
@@ -71,10 +70,6 @@ static int nilfs_warn_segment_error(int err)
71 printk(KERN_WARNING 70 printk(KERN_WARNING
72 "NILFS warning: Sequence number mismatch\n"); 71 "NILFS warning: Sequence number mismatch\n");
73 break; 72 break;
74 case NILFS_SEG_FAIL_CHECKSUM_SEGSUM:
75 printk(KERN_WARNING
76 "NILFS warning: Checksum error in segment summary\n");
77 break;
78 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: 73 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
79 printk(KERN_WARNING 74 printk(KERN_WARNING
80 "NILFS warning: Checksum error in super root\n"); 75 "NILFS warning: Checksum error in super root\n");
@@ -206,19 +201,15 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
206 * @pseg_start: start disk block number of partial segment 201 * @pseg_start: start disk block number of partial segment
207 * @seg_seq: sequence number requested 202 * @seg_seq: sequence number requested
208 * @ssi: pointer to nilfs_segsum_info struct to store information 203 * @ssi: pointer to nilfs_segsum_info struct to store information
209 * @full_check: full check flag
210 * (0: only checks segment summary CRC, 1: data CRC)
211 */ 204 */
212static int 205static int
213load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, 206load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
214 u64 seg_seq, struct nilfs_segsum_info *ssi, 207 u64 seg_seq, struct nilfs_segsum_info *ssi)
215 int full_check)
216{ 208{
217 struct buffer_head *bh_sum; 209 struct buffer_head *bh_sum;
218 struct nilfs_segment_summary *sum; 210 struct nilfs_segment_summary *sum;
219 unsigned long offset, nblock; 211 unsigned long nblock;
220 u64 check_bytes; 212 u32 crc;
221 u32 crc, crc_sum;
222 int ret = NILFS_SEG_FAIL_IO; 213 int ret = NILFS_SEG_FAIL_IO;
223 214
224 bh_sum = sb_bread(sbi->s_super, pseg_start); 215 bh_sum = sb_bread(sbi->s_super, pseg_start);
@@ -237,34 +228,24 @@ load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
237 ret = NILFS_SEG_FAIL_SEQ; 228 ret = NILFS_SEG_FAIL_SEQ;
238 goto failed; 229 goto failed;
239 } 230 }
240 if (full_check) {
241 offset = sizeof(sum->ss_datasum);
242 check_bytes =
243 ((u64)ssi->nblocks << sbi->s_super->s_blocksize_bits);
244 nblock = ssi->nblocks;
245 crc_sum = le32_to_cpu(sum->ss_datasum);
246 ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
247 } else { /* only checks segment summary */
248 offset = sizeof(sum->ss_datasum) + sizeof(sum->ss_sumsum);
249 check_bytes = ssi->sumbytes;
250 nblock = ssi->nsumblk;
251 crc_sum = le32_to_cpu(sum->ss_sumsum);
252 ret = NILFS_SEG_FAIL_CHECKSUM_SEGSUM;
253 }
254 231
232 nblock = ssi->nblocks;
255 if (unlikely(nblock == 0 || 233 if (unlikely(nblock == 0 ||
256 nblock > sbi->s_nilfs->ns_blocks_per_segment)) { 234 nblock > sbi->s_nilfs->ns_blocks_per_segment)) {
257 /* This limits the number of blocks read in the CRC check */ 235 /* This limits the number of blocks read in the CRC check */
258 ret = NILFS_SEG_FAIL_CONSISTENCY; 236 ret = NILFS_SEG_FAIL_CONSISTENCY;
259 goto failed; 237 goto failed;
260 } 238 }
261 if (calc_crc_cont(sbi, bh_sum, &crc, offset, check_bytes, 239 if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum),
240 ((u64)nblock << sbi->s_super->s_blocksize_bits),
262 pseg_start, nblock)) { 241 pseg_start, nblock)) {
263 ret = NILFS_SEG_FAIL_IO; 242 ret = NILFS_SEG_FAIL_IO;
264 goto failed; 243 goto failed;
265 } 244 }
266 if (crc == crc_sum) 245 if (crc == le32_to_cpu(sum->ss_datasum))
267 ret = 0; 246 ret = 0;
247 else
248 ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
268 failed: 249 failed:
269 brelse(bh_sum); 250 brelse(bh_sum);
270 out: 251 out:
@@ -598,7 +579,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
598 579
599 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { 580 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
600 581
601 ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); 582 ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
602 if (ret) { 583 if (ret) {
603 if (ret == NILFS_SEG_FAIL_IO) { 584 if (ret == NILFS_SEG_FAIL_IO) {
604 err = -EIO; 585 err = -EIO;
@@ -821,7 +802,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
821 802
822 for (;;) { 803 for (;;) {
823 /* Load segment summary */ 804 /* Load segment summary */
824 ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); 805 ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
825 if (ret) { 806 if (ret) {
826 if (ret == NILFS_SEG_FAIL_IO) 807 if (ret == NILFS_SEG_FAIL_IO)
827 goto failed; 808 goto failed;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 645c78656aa0..ab56fe44e377 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -40,6 +40,11 @@ struct nilfs_write_info {
40}; 40};
41 41
42 42
43static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
44 struct the_nilfs *nilfs);
45static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
46
47
43static struct kmem_cache *nilfs_segbuf_cachep; 48static struct kmem_cache *nilfs_segbuf_cachep;
44 49
45static void nilfs_segbuf_init_once(void *obj) 50static void nilfs_segbuf_init_once(void *obj)
@@ -302,6 +307,19 @@ void nilfs_truncate_logs(struct list_head *logs,
302 } 307 }
303} 308}
304 309
310int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
311{
312 struct nilfs_segment_buffer *segbuf;
313 int ret = 0;
314
315 list_for_each_entry(segbuf, logs, sb_list) {
316 ret = nilfs_segbuf_write(segbuf, nilfs);
317 if (ret)
318 break;
319 }
320 return ret;
321}
322
305int nilfs_wait_on_logs(struct list_head *logs) 323int nilfs_wait_on_logs(struct list_head *logs)
306{ 324{
307 struct nilfs_segment_buffer *segbuf; 325 struct nilfs_segment_buffer *segbuf;
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 6af1630fb401..94dfd3517bc0 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -166,13 +166,10 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf,
166 segbuf->sb_sum.nfileblk++; 166 segbuf->sb_sum.nfileblk++;
167} 167}
168 168
169int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
170 struct the_nilfs *nilfs);
171int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
172
173void nilfs_clear_logs(struct list_head *logs); 169void nilfs_clear_logs(struct list_head *logs);
174void nilfs_truncate_logs(struct list_head *logs, 170void nilfs_truncate_logs(struct list_head *logs,
175 struct nilfs_segment_buffer *last); 171 struct nilfs_segment_buffer *last);
172int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs);
176int nilfs_wait_on_logs(struct list_head *logs); 173int nilfs_wait_on_logs(struct list_head *logs);
177 174
178static inline void nilfs_destroy_logs(struct list_head *logs) 175static inline void nilfs_destroy_logs(struct list_head *logs)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 105b508b47a8..ada2f1b947a3 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1764,14 +1764,9 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1764static int nilfs_segctor_write(struct nilfs_sc_info *sci, 1764static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1765 struct the_nilfs *nilfs) 1765 struct the_nilfs *nilfs)
1766{ 1766{
1767 struct nilfs_segment_buffer *segbuf; 1767 int ret;
1768 int ret = 0;
1769 1768
1770 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1769 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1771 ret = nilfs_segbuf_write(segbuf, nilfs);
1772 if (ret)
1773 break;
1774 }
1775 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); 1770 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1776 return ret; 1771 return ret;
1777} 1772}
@@ -1937,8 +1932,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1937{ 1932{
1938 struct nilfs_segment_buffer *segbuf; 1933 struct nilfs_segment_buffer *segbuf;
1939 struct page *bd_page = NULL, *fs_page = NULL; 1934 struct page *bd_page = NULL, *fs_page = NULL;
1940 struct nilfs_sb_info *sbi = sci->sc_sbi; 1935 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
1941 struct the_nilfs *nilfs = sbi->s_nilfs;
1942 int update_sr = (sci->sc_super_root != NULL); 1936 int update_sr = (sci->sc_super_root != NULL);
1943 1937
1944 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { 1938 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
@@ -2020,7 +2014,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
2020 if (update_sr) { 2014 if (update_sr) {
2021 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, 2015 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
2022 segbuf->sb_sum.seg_seq, nilfs->ns_cno++); 2016 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
2023 sbi->s_super->s_dirt = 1; 2017 set_nilfs_sb_dirty(nilfs);
2024 2018
2025 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 2019 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
2026 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); 2020 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
@@ -2425,43 +2419,43 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2425 return err; 2419 return err;
2426} 2420}
2427 2421
2428struct nilfs_segctor_req {
2429 int mode;
2430 __u32 seq_accepted;
2431 int sc_err; /* construction failure */
2432 int sb_err; /* super block writeback failure */
2433};
2434
2435#define FLUSH_FILE_BIT (0x1) /* data file only */ 2422#define FLUSH_FILE_BIT (0x1) /* data file only */
2436#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */ 2423#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2437 2424
2438static void nilfs_segctor_accept(struct nilfs_sc_info *sci, 2425/**
2439 struct nilfs_segctor_req *req) 2426 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2427 * @sci: segment constructor object
2428 */
2429static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2440{ 2430{
2441 req->sc_err = req->sb_err = 0;
2442 spin_lock(&sci->sc_state_lock); 2431 spin_lock(&sci->sc_state_lock);
2443 req->seq_accepted = sci->sc_seq_request; 2432 sci->sc_seq_accepted = sci->sc_seq_request;
2444 spin_unlock(&sci->sc_state_lock); 2433 spin_unlock(&sci->sc_state_lock);
2445 2434
2446 if (sci->sc_timer) 2435 if (sci->sc_timer)
2447 del_timer_sync(sci->sc_timer); 2436 del_timer_sync(sci->sc_timer);
2448} 2437}
2449 2438
2450static void nilfs_segctor_notify(struct nilfs_sc_info *sci, 2439/**
2451 struct nilfs_segctor_req *req) 2440 * nilfs_segctor_notify - notify the result of request to caller threads
2441 * @sci: segment constructor object
2442 * @mode: mode of log forming
2443 * @err: error code to be notified
2444 */
2445static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2452{ 2446{
2453 /* Clear requests (even when the construction failed) */ 2447 /* Clear requests (even when the construction failed) */
2454 spin_lock(&sci->sc_state_lock); 2448 spin_lock(&sci->sc_state_lock);
2455 2449
2456 if (req->mode == SC_LSEG_SR) { 2450 if (mode == SC_LSEG_SR) {
2457 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; 2451 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2458 sci->sc_seq_done = req->seq_accepted; 2452 sci->sc_seq_done = sci->sc_seq_accepted;
2459 nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err); 2453 nilfs_segctor_wakeup(sci, err);
2460 sci->sc_flush_request = 0; 2454 sci->sc_flush_request = 0;
2461 } else { 2455 } else {
2462 if (req->mode == SC_FLUSH_FILE) 2456 if (mode == SC_FLUSH_FILE)
2463 sci->sc_flush_request &= ~FLUSH_FILE_BIT; 2457 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2464 else if (req->mode == SC_FLUSH_DAT) 2458 else if (mode == SC_FLUSH_DAT)
2465 sci->sc_flush_request &= ~FLUSH_DAT_BIT; 2459 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2466 2460
2467 /* re-enable timer if checkpoint creation was not done */ 2461 /* re-enable timer if checkpoint creation was not done */
@@ -2472,30 +2466,37 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
2472 spin_unlock(&sci->sc_state_lock); 2466 spin_unlock(&sci->sc_state_lock);
2473} 2467}
2474 2468
2475static int nilfs_segctor_construct(struct nilfs_sc_info *sci, 2469/**
2476 struct nilfs_segctor_req *req) 2470 * nilfs_segctor_construct - form logs and write them to disk
2471 * @sci: segment constructor object
2472 * @mode: mode of log forming
2473 */
2474static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2477{ 2475{
2478 struct nilfs_sb_info *sbi = sci->sc_sbi; 2476 struct nilfs_sb_info *sbi = sci->sc_sbi;
2479 struct the_nilfs *nilfs = sbi->s_nilfs; 2477 struct the_nilfs *nilfs = sbi->s_nilfs;
2480 int err = 0; 2478 int err = 0;
2481 2479
2480 nilfs_segctor_accept(sci);
2481
2482 if (nilfs_discontinued(nilfs)) 2482 if (nilfs_discontinued(nilfs))
2483 req->mode = SC_LSEG_SR; 2483 mode = SC_LSEG_SR;
2484 if (!nilfs_segctor_confirm(sci)) { 2484 if (!nilfs_segctor_confirm(sci))
2485 err = nilfs_segctor_do_construct(sci, req->mode); 2485 err = nilfs_segctor_do_construct(sci, mode);
2486 req->sc_err = err; 2486
2487 }
2488 if (likely(!err)) { 2487 if (likely(!err)) {
2489 if (req->mode != SC_FLUSH_DAT) 2488 if (mode != SC_FLUSH_DAT)
2490 atomic_set(&nilfs->ns_ndirtyblks, 0); 2489 atomic_set(&nilfs->ns_ndirtyblks, 0);
2491 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && 2490 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2492 nilfs_discontinued(nilfs)) { 2491 nilfs_discontinued(nilfs)) {
2493 down_write(&nilfs->ns_sem); 2492 down_write(&nilfs->ns_sem);
2494 req->sb_err = nilfs_commit_super(sbi, 2493 err = nilfs_commit_super(
2495 nilfs_altsb_need_update(nilfs)); 2494 sbi, nilfs_altsb_need_update(nilfs));
2496 up_write(&nilfs->ns_sem); 2495 up_write(&nilfs->ns_sem);
2497 } 2496 }
2498 } 2497 }
2498
2499 nilfs_segctor_notify(sci, mode, err);
2499 return err; 2500 return err;
2500} 2501}
2501 2502
@@ -2526,7 +2527,6 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2526 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2527 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2527 struct the_nilfs *nilfs = sbi->s_nilfs; 2528 struct the_nilfs *nilfs = sbi->s_nilfs;
2528 struct nilfs_transaction_info ti; 2529 struct nilfs_transaction_info ti;
2529 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
2530 int err; 2530 int err;
2531 2531
2532 if (unlikely(!sci)) 2532 if (unlikely(!sci))
@@ -2547,10 +2547,8 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2547 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); 2547 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2548 2548
2549 for (;;) { 2549 for (;;) {
2550 nilfs_segctor_accept(sci, &req); 2550 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2551 err = nilfs_segctor_construct(sci, &req);
2552 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); 2551 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2553 nilfs_segctor_notify(sci, &req);
2554 2552
2555 if (likely(!err)) 2553 if (likely(!err))
2556 break; 2554 break;
@@ -2560,6 +2558,16 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2560 set_current_state(TASK_INTERRUPTIBLE); 2558 set_current_state(TASK_INTERRUPTIBLE);
2561 schedule_timeout(sci->sc_interval); 2559 schedule_timeout(sci->sc_interval);
2562 } 2560 }
2561 if (nilfs_test_opt(sbi, DISCARD)) {
2562 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2563 sci->sc_nfreesegs);
2564 if (ret) {
2565 printk(KERN_WARNING
2566 "NILFS warning: error %d on discard request, "
2567 "turning discards off for the device\n", ret);
2568 nilfs_clear_opt(sbi, DISCARD);
2569 }
2570 }
2563 2571
2564 out_unlock: 2572 out_unlock:
2565 sci->sc_freesegs = NULL; 2573 sci->sc_freesegs = NULL;
@@ -2573,13 +2581,9 @@ static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2573{ 2581{
2574 struct nilfs_sb_info *sbi = sci->sc_sbi; 2582 struct nilfs_sb_info *sbi = sci->sc_sbi;
2575 struct nilfs_transaction_info ti; 2583 struct nilfs_transaction_info ti;
2576 struct nilfs_segctor_req req = { .mode = mode };
2577 2584
2578 nilfs_transaction_lock(sbi, &ti, 0); 2585 nilfs_transaction_lock(sbi, &ti, 0);
2579 2586 nilfs_segctor_construct(sci, mode);
2580 nilfs_segctor_accept(sci, &req);
2581 nilfs_segctor_construct(sci, &req);
2582 nilfs_segctor_notify(sci, &req);
2583 2587
2584 /* 2588 /*
2585 * Unclosed segment should be retried. We do this using sc_timer. 2589 * Unclosed segment should be retried. We do this using sc_timer.
@@ -2635,6 +2639,7 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2635static int nilfs_segctor_thread(void *arg) 2639static int nilfs_segctor_thread(void *arg)
2636{ 2640{
2637 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; 2641 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2642 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
2638 struct timer_list timer; 2643 struct timer_list timer;
2639 int timeout = 0; 2644 int timeout = 0;
2640 2645
@@ -2680,7 +2685,6 @@ static int nilfs_segctor_thread(void *arg)
2680 } else { 2685 } else {
2681 DEFINE_WAIT(wait); 2686 DEFINE_WAIT(wait);
2682 int should_sleep = 1; 2687 int should_sleep = 1;
2683 struct the_nilfs *nilfs;
2684 2688
2685 prepare_to_wait(&sci->sc_wait_daemon, &wait, 2689 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2686 TASK_INTERRUPTIBLE); 2690 TASK_INTERRUPTIBLE);
@@ -2701,8 +2705,8 @@ static int nilfs_segctor_thread(void *arg)
2701 finish_wait(&sci->sc_wait_daemon, &wait); 2705 finish_wait(&sci->sc_wait_daemon, &wait);
2702 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && 2706 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2703 time_after_eq(jiffies, sci->sc_timer->expires)); 2707 time_after_eq(jiffies, sci->sc_timer->expires));
2704 nilfs = sci->sc_sbi->s_nilfs; 2708
2705 if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs)) 2709 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2706 set_nilfs_discontinued(nilfs); 2710 set_nilfs_discontinued(nilfs);
2707 } 2711 }
2708 goto loop; 2712 goto loop;
@@ -2797,12 +2801,9 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2797 do { 2801 do {
2798 struct nilfs_sb_info *sbi = sci->sc_sbi; 2802 struct nilfs_sb_info *sbi = sci->sc_sbi;
2799 struct nilfs_transaction_info ti; 2803 struct nilfs_transaction_info ti;
2800 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
2801 2804
2802 nilfs_transaction_lock(sbi, &ti, 0); 2805 nilfs_transaction_lock(sbi, &ti, 0);
2803 nilfs_segctor_accept(sci, &req); 2806 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2804 ret = nilfs_segctor_construct(sci, &req);
2805 nilfs_segctor_notify(sci, &req);
2806 nilfs_transaction_unlock(sbi); 2807 nilfs_transaction_unlock(sbi);
2807 2808
2808 } while (ret && retrycount-- > 0); 2809 } while (ret && retrycount-- > 0);
@@ -2865,8 +2866,15 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
2865 struct the_nilfs *nilfs = sbi->s_nilfs; 2866 struct the_nilfs *nilfs = sbi->s_nilfs;
2866 int err; 2867 int err;
2867 2868
2868 /* Each field of nilfs_segctor is cleared through the initialization 2869 if (NILFS_SC(sbi)) {
2869 of super-block info */ 2870 /*
2871 * This happens if the filesystem was remounted
2872 * read/write after nilfs_error degenerated it into a
2873 * read-only mount.
2874 */
2875 nilfs_detach_segment_constructor(sbi);
2876 }
2877
2870 sbi->s_sc_info = nilfs_segctor_new(sbi); 2878 sbi->s_sc_info = nilfs_segctor_new(sbi);
2871 if (!sbi->s_sc_info) 2879 if (!sbi->s_sc_info)
2872 return -ENOMEM; 2880 return -ENOMEM;
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 3d3ab2f9864c..3155e0c7f415 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -116,6 +116,7 @@ struct nilfs_segsum_pointer {
116 * @sc_wait_daemon: Daemon wait queue 116 * @sc_wait_daemon: Daemon wait queue
117 * @sc_wait_task: Start/end wait queue to control segctord task 117 * @sc_wait_task: Start/end wait queue to control segctord task
118 * @sc_seq_request: Request counter 118 * @sc_seq_request: Request counter
119 * @sc_seq_accept: Accepted request count
119 * @sc_seq_done: Completion counter 120 * @sc_seq_done: Completion counter
120 * @sc_sync: Request of explicit sync operation 121 * @sc_sync: Request of explicit sync operation
121 * @sc_interval: Timeout value of background construction 122 * @sc_interval: Timeout value of background construction
@@ -169,6 +170,7 @@ struct nilfs_sc_info {
169 wait_queue_head_t sc_wait_task; 170 wait_queue_head_t sc_wait_task;
170 171
171 __u32 sc_seq_request; 172 __u32 sc_seq_request;
173 __u32 sc_seq_accepted;
172 __u32 sc_seq_done; 174 __u32 sc_seq_done;
173 175
174 int sc_sync; 176 int sc_sync;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8173faee31e6..92579cc4c935 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -96,9 +96,6 @@ void nilfs_error(struct super_block *sb, const char *function,
96 if (!(sb->s_flags & MS_RDONLY)) { 96 if (!(sb->s_flags & MS_RDONLY)) {
97 struct the_nilfs *nilfs = sbi->s_nilfs; 97 struct the_nilfs *nilfs = sbi->s_nilfs;
98 98
99 if (!nilfs_test_opt(sbi, ERRORS_CONT))
100 nilfs_detach_segment_constructor(sbi);
101
102 down_write(&nilfs->ns_sem); 99 down_write(&nilfs->ns_sem);
103 if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { 100 if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
104 nilfs->ns_mount_state |= NILFS_ERROR_FS; 101 nilfs->ns_mount_state |= NILFS_ERROR_FS;
@@ -301,7 +298,7 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb)
301 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); 298 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
302 nilfs->ns_sbwtime[1] = t; 299 nilfs->ns_sbwtime[1] = t;
303 } 300 }
304 sbi->s_super->s_dirt = 0; 301 clear_nilfs_sb_dirty(nilfs);
305 return nilfs_sync_super(sbi, dupsb); 302 return nilfs_sync_super(sbi, dupsb);
306} 303}
307 304
@@ -345,7 +342,7 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
345 err = nilfs_construct_segment(sb); 342 err = nilfs_construct_segment(sb);
346 343
347 down_write(&nilfs->ns_sem); 344 down_write(&nilfs->ns_sem);
348 if (sb->s_dirt) 345 if (nilfs_sb_dirty(nilfs))
349 nilfs_commit_super(sbi, 1); 346 nilfs_commit_super(sbi, 1);
350 up_write(&nilfs->ns_sem); 347 up_write(&nilfs->ns_sem);
351 348
@@ -481,6 +478,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
481 seq_printf(seq, ",order=strict"); 478 seq_printf(seq, ",order=strict");
482 if (nilfs_test_opt(sbi, NORECOVERY)) 479 if (nilfs_test_opt(sbi, NORECOVERY))
483 seq_printf(seq, ",norecovery"); 480 seq_printf(seq, ",norecovery");
481 if (nilfs_test_opt(sbi, DISCARD))
482 seq_printf(seq, ",discard");
484 483
485 return 0; 484 return 0;
486} 485}
@@ -550,7 +549,7 @@ static const struct export_operations nilfs_export_ops = {
550enum { 549enum {
551 Opt_err_cont, Opt_err_panic, Opt_err_ro, 550 Opt_err_cont, Opt_err_panic, Opt_err_ro,
552 Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, 551 Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
553 Opt_err, 552 Opt_discard, Opt_err,
554}; 553};
555 554
556static match_table_t tokens = { 555static match_table_t tokens = {
@@ -561,6 +560,7 @@ static match_table_t tokens = {
561 {Opt_snapshot, "cp=%u"}, 560 {Opt_snapshot, "cp=%u"},
562 {Opt_order, "order=%s"}, 561 {Opt_order, "order=%s"},
563 {Opt_norecovery, "norecovery"}, 562 {Opt_norecovery, "norecovery"},
563 {Opt_discard, "discard"},
564 {Opt_err, NULL} 564 {Opt_err, NULL}
565}; 565};
566 566
@@ -614,6 +614,9 @@ static int parse_options(char *options, struct super_block *sb)
614 case Opt_norecovery: 614 case Opt_norecovery:
615 nilfs_set_opt(sbi, NORECOVERY); 615 nilfs_set_opt(sbi, NORECOVERY);
616 break; 616 break;
617 case Opt_discard:
618 nilfs_set_opt(sbi, DISCARD);
619 break;
617 default: 620 default:
618 printk(KERN_ERR 621 printk(KERN_ERR
619 "NILFS: Unrecognized mount option \"%s\"\n", p); 622 "NILFS: Unrecognized mount option \"%s\"\n", p);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 6241e1722efc..92733d5651d2 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -646,6 +646,44 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
646 goto out; 646 goto out;
647} 647}
648 648
649int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
650 size_t nsegs)
651{
652 sector_t seg_start, seg_end;
653 sector_t start = 0, nblocks = 0;
654 unsigned int sects_per_block;
655 __u64 *sn;
656 int ret = 0;
657
658 sects_per_block = (1 << nilfs->ns_blocksize_bits) /
659 bdev_logical_block_size(nilfs->ns_bdev);
660 for (sn = segnump; sn < segnump + nsegs; sn++) {
661 nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end);
662
663 if (!nblocks) {
664 start = seg_start;
665 nblocks = seg_end - seg_start + 1;
666 } else if (start + nblocks == seg_start) {
667 nblocks += seg_end - seg_start + 1;
668 } else {
669 ret = blkdev_issue_discard(nilfs->ns_bdev,
670 start * sects_per_block,
671 nblocks * sects_per_block,
672 GFP_NOFS,
673 DISCARD_FL_BARRIER);
674 if (ret < 0)
675 return ret;
676 nblocks = 0;
677 }
678 }
679 if (nblocks)
680 ret = blkdev_issue_discard(nilfs->ns_bdev,
681 start * sects_per_block,
682 nblocks * sects_per_block,
683 GFP_NOFS, DISCARD_FL_BARRIER);
684 return ret;
685}
686
649int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) 687int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
650{ 688{
651 struct inode *dat = nilfs_dat_inode(nilfs); 689 struct inode *dat = nilfs_dat_inode(nilfs);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 589786e33464..e9795f1724d7 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -38,6 +38,7 @@ enum {
38 the latest checkpoint was loaded */ 38 the latest checkpoint was loaded */
39 THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ 39 THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
40 THE_NILFS_GC_RUNNING, /* gc process is running */ 40 THE_NILFS_GC_RUNNING, /* gc process is running */
41 THE_NILFS_SB_DIRTY, /* super block is dirty */
41}; 42};
42 43
43/** 44/**
@@ -197,6 +198,7 @@ THE_NILFS_FNS(INIT, init)
197THE_NILFS_FNS(LOADED, loaded) 198THE_NILFS_FNS(LOADED, loaded)
198THE_NILFS_FNS(DISCONTINUED, discontinued) 199THE_NILFS_FNS(DISCONTINUED, discontinued)
199THE_NILFS_FNS(GC_RUNNING, gc_running) 200THE_NILFS_FNS(GC_RUNNING, gc_running)
201THE_NILFS_FNS(SB_DIRTY, sb_dirty)
200 202
201/* Minimum interval of periodical update of superblocks (in seconds) */ 203/* Minimum interval of periodical update of superblocks (in seconds) */
202#define NILFS_SB_FREQ 10 204#define NILFS_SB_FREQ 10
@@ -221,6 +223,7 @@ struct the_nilfs *find_or_create_nilfs(struct block_device *);
221void put_nilfs(struct the_nilfs *); 223void put_nilfs(struct the_nilfs *);
222int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *); 224int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *);
223int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *); 225int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *);
226int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
224int nilfs_count_free_blocks(struct the_nilfs *, sector_t *); 227int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
225struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64); 228struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64);
226int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int); 229int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index a94e8bd8eb1f..472cdf29ef82 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -29,14 +29,12 @@
29#include <linux/init.h> /* module_init */ 29#include <linux/init.h> /* module_init */
30#include <linux/inotify.h> 30#include <linux/inotify.h>
31#include <linux/kernel.h> /* roundup() */ 31#include <linux/kernel.h> /* roundup() */
32#include <linux/magic.h> /* superblock magic number */
33#include <linux/mount.h> /* mntget */
34#include <linux/namei.h> /* LOOKUP_FOLLOW */ 32#include <linux/namei.h> /* LOOKUP_FOLLOW */
35#include <linux/path.h> /* struct path */
36#include <linux/sched.h> /* struct user */ 33#include <linux/sched.h> /* struct user */
37#include <linux/slab.h> /* struct kmem_cache */ 34#include <linux/slab.h> /* struct kmem_cache */
38#include <linux/syscalls.h> 35#include <linux/syscalls.h>
39#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/anon_inodes.h>
40#include <linux/uaccess.h> 38#include <linux/uaccess.h>
41#include <linux/poll.h> 39#include <linux/poll.h>
42#include <linux/wait.h> 40#include <linux/wait.h>
@@ -45,8 +43,6 @@
45 43
46#include <asm/ioctls.h> 44#include <asm/ioctls.h>
47 45
48static struct vfsmount *inotify_mnt __read_mostly;
49
50/* these are configurable via /proc/sys/fs/inotify/ */ 46/* these are configurable via /proc/sys/fs/inotify/ */
51static int inotify_max_user_instances __read_mostly; 47static int inotify_max_user_instances __read_mostly;
52static int inotify_max_queued_events __read_mostly; 48static int inotify_max_queued_events __read_mostly;
@@ -645,9 +641,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
645{ 641{
646 struct fsnotify_group *group; 642 struct fsnotify_group *group;
647 struct user_struct *user; 643 struct user_struct *user;
648 struct file *filp; 644 int ret;
649 struct path path;
650 int fd, ret;
651 645
652 /* Check the IN_* constants for consistency. */ 646 /* Check the IN_* constants for consistency. */
653 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); 647 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
@@ -656,10 +650,6 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
656 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 650 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
657 return -EINVAL; 651 return -EINVAL;
658 652
659 fd = get_unused_fd_flags(flags & O_CLOEXEC);
660 if (fd < 0)
661 return fd;
662
663 user = get_current_user(); 653 user = get_current_user();
664 if (unlikely(atomic_read(&user->inotify_devs) >= 654 if (unlikely(atomic_read(&user->inotify_devs) >=
665 inotify_max_user_instances)) { 655 inotify_max_user_instances)) {
@@ -676,27 +666,14 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
676 666
677 atomic_inc(&user->inotify_devs); 667 atomic_inc(&user->inotify_devs);
678 668
679 path.mnt = inotify_mnt; 669 ret = anon_inode_getfd("inotify", &inotify_fops, group,
680 path.dentry = inotify_mnt->mnt_root; 670 O_RDONLY | flags);
681 path_get(&path); 671 if (ret >= 0)
682 filp = alloc_file(&path, FMODE_READ, &inotify_fops); 672 return ret;
683 if (!filp)
684 goto Enfile;
685 673
686 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
687 filp->private_data = group;
688
689 fd_install(fd, filp);
690
691 return fd;
692
693Enfile:
694 ret = -ENFILE;
695 path_put(&path);
696 atomic_dec(&user->inotify_devs); 674 atomic_dec(&user->inotify_devs);
697out_free_uid: 675out_free_uid:
698 free_uid(user); 676 free_uid(user);
699 put_unused_fd(fd);
700 return ret; 677 return ret;
701} 678}
702 679
@@ -783,20 +760,6 @@ out:
783 return ret; 760 return ret;
784} 761}
785 762
786static int
787inotify_get_sb(struct file_system_type *fs_type, int flags,
788 const char *dev_name, void *data, struct vfsmount *mnt)
789{
790 return get_sb_pseudo(fs_type, "inotify", NULL,
791 INOTIFYFS_SUPER_MAGIC, mnt);
792}
793
794static struct file_system_type inotify_fs_type = {
795 .name = "inotifyfs",
796 .get_sb = inotify_get_sb,
797 .kill_sb = kill_anon_super,
798};
799
800/* 763/*
801 * inotify_user_setup - Our initialization function. Note that we cannnot return 764 * inotify_user_setup - Our initialization function. Note that we cannnot return
802 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 765 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
@@ -804,16 +767,6 @@ static struct file_system_type inotify_fs_type = {
804 */ 767 */
805static int __init inotify_user_setup(void) 768static int __init inotify_user_setup(void)
806{ 769{
807 int ret;
808
809 ret = register_filesystem(&inotify_fs_type);
810 if (unlikely(ret))
811 panic("inotify: register_filesystem returned %d!\n", ret);
812
813 inotify_mnt = kern_mount(&inotify_fs_type);
814 if (IS_ERR(inotify_mnt))
815 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
816
817 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 770 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
818 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 771 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
819 772
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 5a9e34475e37..9173e82a45d1 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1545,7 +1545,7 @@ static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry,
1545 write_inode_now(bmp_vi, !datasync); 1545 write_inode_now(bmp_vi, !datasync);
1546 iput(bmp_vi); 1546 iput(bmp_vi);
1547 } 1547 }
1548 ret = ntfs_write_inode(vi, 1); 1548 ret = __ntfs_write_inode(vi, 1);
1549 write_inode_now(vi, !datasync); 1549 write_inode_now(vi, !datasync);
1550 err = sync_blockdev(vi->i_sb->s_bdev); 1550 err = sync_blockdev(vi->i_sb->s_bdev);
1551 if (unlikely(err && !ret)) 1551 if (unlikely(err && !ret))
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 43179ddd336f..b681c71d7069 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2182,7 +2182,7 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
2182 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); 2182 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
2183 BUG_ON(S_ISDIR(vi->i_mode)); 2183 BUG_ON(S_ISDIR(vi->i_mode));
2184 if (!datasync || !NInoNonResident(NTFS_I(vi))) 2184 if (!datasync || !NInoNonResident(NTFS_I(vi)))
2185 ret = ntfs_write_inode(vi, 1); 2185 ret = __ntfs_write_inode(vi, 1);
2186 write_inode_now(vi, !datasync); 2186 write_inode_now(vi, !datasync);
2187 /* 2187 /*
2188 * NOTE: If we were to use mapping->private_list (see ext2 and 2188 * NOTE: If we were to use mapping->private_list (see ext2 and
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index dc2505abb6d7..4b57fb1eac2a 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2957,7 +2957,7 @@ out:
2957 * 2957 *
2958 * Return 0 on success and -errno on error. 2958 * Return 0 on success and -errno on error.
2959 */ 2959 */
2960int ntfs_write_inode(struct inode *vi, int sync) 2960int __ntfs_write_inode(struct inode *vi, int sync)
2961{ 2961{
2962 sle64 nt; 2962 sle64 nt;
2963 ntfs_inode *ni = NTFS_I(vi); 2963 ntfs_inode *ni = NTFS_I(vi);
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index 117eaf8032a3..9a113544605d 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -307,12 +307,12 @@ extern void ntfs_truncate_vfs(struct inode *vi);
307 307
308extern int ntfs_setattr(struct dentry *dentry, struct iattr *attr); 308extern int ntfs_setattr(struct dentry *dentry, struct iattr *attr);
309 309
310extern int ntfs_write_inode(struct inode *vi, int sync); 310extern int __ntfs_write_inode(struct inode *vi, int sync);
311 311
312static inline void ntfs_commit_inode(struct inode *vi) 312static inline void ntfs_commit_inode(struct inode *vi)
313{ 313{
314 if (!is_bad_inode(vi)) 314 if (!is_bad_inode(vi))
315 ntfs_write_inode(vi, 1); 315 __ntfs_write_inode(vi, 1);
316 return; 316 return;
317} 317}
318 318
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 80b04770e8e9..1cf39dfaee7a 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -39,6 +39,7 @@
39#include "dir.h" 39#include "dir.h"
40#include "debug.h" 40#include "debug.h"
41#include "index.h" 41#include "index.h"
42#include "inode.h"
42#include "aops.h" 43#include "aops.h"
43#include "layout.h" 44#include "layout.h"
44#include "malloc.h" 45#include "malloc.h"
@@ -2662,6 +2663,13 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2662 return 0; 2663 return 0;
2663} 2664}
2664 2665
2666#ifdef NTFS_RW
2667static int ntfs_write_inode(struct inode *vi, struct writeback_control *wbc)
2668{
2669 return __ntfs_write_inode(vi, wbc->sync_mode == WB_SYNC_ALL);
2670}
2671#endif
2672
2665/** 2673/**
2666 * The complete super operations. 2674 * The complete super operations.
2667 */ 2675 */
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 600d2d2ade11..791c0886c060 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -46,6 +46,7 @@ ocfs2_stackglue-objs := stackglue.o
46ocfs2_stack_o2cb-objs := stack_o2cb.o 46ocfs2_stack_o2cb-objs := stack_o2cb.o
47ocfs2_stack_user-objs := stack_user.o 47ocfs2_stack_user-objs := stack_user.o
48 48
49obj-$(CONFIG_OCFS2_FS) += dlmfs/
49# cluster/ is always needed when OCFS2_FS for masklog support 50# cluster/ is always needed when OCFS2_FS for masklog support
50obj-$(CONFIG_OCFS2_FS) += cluster/ 51obj-$(CONFIG_OCFS2_FS) += cluster/
51obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/ 52obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 20538dd832a4..9f8bd913c51e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1050,7 +1050,8 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
1050 strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); 1050 strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
1051 eb->h_blkno = cpu_to_le64(first_blkno); 1051 eb->h_blkno = cpu_to_le64(first_blkno);
1052 eb->h_fs_generation = cpu_to_le32(osb->fs_generation); 1052 eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
1053 eb->h_suballoc_slot = cpu_to_le16(osb->slot_num); 1053 eb->h_suballoc_slot =
1054 cpu_to_le16(meta_ac->ac_alloc_slot);
1054 eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1055 eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1055 eb->h_list.l_count = 1056 eb->h_list.l_count =
1056 cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); 1057 cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
@@ -6037,7 +6038,7 @@ static void ocfs2_truncate_log_worker(struct work_struct *work)
6037 if (status < 0) 6038 if (status < 0)
6038 mlog_errno(status); 6039 mlog_errno(status);
6039 else 6040 else
6040 ocfs2_init_inode_steal_slot(osb); 6041 ocfs2_init_steal_slots(osb);
6041 6042
6042 mlog_exit(status); 6043 mlog_exit(status);
6043} 6044}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 7d04c171567d..21441ddb5506 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -577,8 +577,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
577 goto bail; 577 goto bail;
578 } 578 }
579 579
580 /* We should already CoW the refcounted extent. */ 580 /* We should already CoW the refcounted extent in case of create. */
581 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); 581 BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
582
582 /* 583 /*
583 * get_more_blocks() expects us to describe a hole by clearing 584 * get_more_blocks() expects us to describe a hole by clearing
584 * the mapped bit on bh_result(). 585 * the mapped bit on bh_result().
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 1cd2934de615..b39da877b12f 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -112,6 +112,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
112 define_mask(XATTR), 112 define_mask(XATTR),
113 define_mask(QUOTA), 113 define_mask(QUOTA),
114 define_mask(REFCOUNT), 114 define_mask(REFCOUNT),
115 define_mask(BASTS),
115 define_mask(ERROR), 116 define_mask(ERROR),
116 define_mask(NOTICE), 117 define_mask(NOTICE),
117 define_mask(KTHREAD), 118 define_mask(KTHREAD),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 9b4d11726cf2..3dfddbec32f2 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -114,6 +114,7 @@
114#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ 114#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
115#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ 115#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
116#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ 116#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
117#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */
117/* bits that are infrequently given and frequently matched in the high word */ 118/* bits that are infrequently given and frequently matched in the high word */
118#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ 119#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
119#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ 120#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
@@ -194,9 +195,9 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
194 * previous token if args expands to nothing. 195 * previous token if args expands to nothing.
195 */ 196 */
196#define __mlog_printk(level, fmt, args...) \ 197#define __mlog_printk(level, fmt, args...) \
197 printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \ 198 printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
198 __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \ 199 task_pid_nr(current), __mlog_cpu_guess, \
199 ##args) 200 __PRETTY_FUNCTION__, __LINE__ , ##args)
200 201
201#define mlog(mask, fmt, args...) do { \ 202#define mlog(mask, fmt, args...) do { \
202 u64 __m = MLOG_MASK_PREFIX | (mask); \ 203 u64 __m = MLOG_MASK_PREFIX | (mask); \
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index a63ea4d74e67..efd77d071c80 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2439,7 +2439,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2439 dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; 2439 dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2440 memset(dx_root, 0, osb->sb->s_blocksize); 2440 memset(dx_root, 0, osb->sb->s_blocksize);
2441 strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); 2441 strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
2442 dx_root->dr_suballoc_slot = cpu_to_le16(osb->slot_num); 2442 dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
2443 dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit); 2443 dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
2444 dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation); 2444 dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
2445 dx_root->dr_blkno = cpu_to_le64(dr_blkno); 2445 dx_root->dr_blkno = cpu_to_le64(dr_blkno);
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index 190361375700..dcebf0d920fa 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,8 +1,7 @@
1EXTRA_CFLAGS += -Ifs/ocfs2 1EXTRA_CFLAGS += -Ifs/ocfs2
2 2
3obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o 3obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
4 4
5ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \ 5ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
6 dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o 6 dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o
7 7
8ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 344bcf90cbf4..b4f99de2caf3 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -310,7 +310,7 @@ static int dlm_recovery_thread(void *data)
310 mlog(0, "dlm thread running for %s...\n", dlm->name); 310 mlog(0, "dlm thread running for %s...\n", dlm->name);
311 311
312 while (!kthread_should_stop()) { 312 while (!kthread_should_stop()) {
313 if (dlm_joined(dlm)) { 313 if (dlm_domain_fully_joined(dlm)) {
314 status = dlm_do_recovery(dlm); 314 status = dlm_do_recovery(dlm);
315 if (status == -EAGAIN) { 315 if (status == -EAGAIN) {
316 /* do not sleep, recheck immediately. */ 316 /* do not sleep, recheck immediately. */
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
new file mode 100644
index 000000000000..df69b4856d0d
--- /dev/null
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -0,0 +1,5 @@
1EXTRA_CFLAGS += -Ifs/ocfs2
2
3obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
4
5ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 02bf17808bdc..1b0de157a08c 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -43,24 +43,17 @@
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/string.h> 44#include <linux/string.h>
45#include <linux/backing-dev.h> 45#include <linux/backing-dev.h>
46#include <linux/poll.h>
46 47
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48 49
49 50#include "stackglue.h"
50#include "cluster/nodemanager.h"
51#include "cluster/heartbeat.h"
52#include "cluster/tcp.h"
53
54#include "dlmapi.h"
55
56#include "userdlm.h" 51#include "userdlm.h"
57
58#include "dlmfsver.h" 52#include "dlmfsver.h"
59 53
60#define MLOG_MASK_PREFIX ML_DLMFS 54#define MLOG_MASK_PREFIX ML_DLMFS
61#include "cluster/masklog.h" 55#include "cluster/masklog.h"
62 56
63#include "ocfs2_lockingver.h"
64 57
65static const struct super_operations dlmfs_ops; 58static const struct super_operations dlmfs_ops;
66static const struct file_operations dlmfs_file_operations; 59static const struct file_operations dlmfs_file_operations;
@@ -71,15 +64,46 @@ static struct kmem_cache *dlmfs_inode_cache;
71 64
72struct workqueue_struct *user_dlm_worker; 65struct workqueue_struct *user_dlm_worker;
73 66
67
68
74/* 69/*
75 * This is the userdlmfs locking protocol version. 70 * These are the ABI capabilities of dlmfs.
71 *
72 * Over time, dlmfs has added some features that were not part of the
73 * initial ABI. Unfortunately, some of these features are not detectable
74 * via standard usage. For example, Linux's default poll always returns
75 * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
76 * added poll support. Instead, we provide this list of new capabilities.
77 *
78 * Capabilities is a read-only attribute. We do it as a module parameter
79 * so we can discover it whether dlmfs is built in, loaded, or even not
80 * loaded.
76 * 81 *
77 * See fs/ocfs2/dlmglue.c for more details on locking versions. 82 * The ABI features are local to this machine's dlmfs mount. This is
83 * distinct from the locking protocol, which is concerned with inter-node
84 * interaction.
85 *
86 * Capabilities:
87 * - bast : POLLIN against the file descriptor of a held lock
88 * signifies a bast fired on the lock.
78 */ 89 */
79static const struct dlm_protocol_version user_locking_protocol = { 90#define DLMFS_CAPABILITIES "bast stackglue"
80 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, 91extern int param_set_dlmfs_capabilities(const char *val,
81 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, 92 struct kernel_param *kp)
82}; 93{
94 printk(KERN_ERR "%s: readonly parameter\n", kp->name);
95 return -EINVAL;
96}
97static int param_get_dlmfs_capabilities(char *buffer,
98 struct kernel_param *kp)
99{
100 return strlcpy(buffer, DLMFS_CAPABILITIES,
101 strlen(DLMFS_CAPABILITIES) + 1);
102}
103module_param_call(capabilities, param_set_dlmfs_capabilities,
104 param_get_dlmfs_capabilities, NULL, 0444);
105MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
106
83 107
84/* 108/*
85 * decodes a set of open flags into a valid lock level and a set of flags. 109 * decodes a set of open flags into a valid lock level and a set of flags.
@@ -179,13 +203,46 @@ static int dlmfs_file_release(struct inode *inode,
179 return 0; 203 return 0;
180} 204}
181 205
206/*
207 * We do ->setattr() just to override size changes. Our size is the size
208 * of the LVB and nothing else.
209 */
210static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
211{
212 int error;
213 struct inode *inode = dentry->d_inode;
214
215 attr->ia_valid &= ~ATTR_SIZE;
216 error = inode_change_ok(inode, attr);
217 if (!error)
218 error = inode_setattr(inode, attr);
219
220 return error;
221}
222
223static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
224{
225 int event = 0;
226 struct inode *inode = file->f_path.dentry->d_inode;
227 struct dlmfs_inode_private *ip = DLMFS_I(inode);
228
229 poll_wait(file, &ip->ip_lockres.l_event, wait);
230
231 spin_lock(&ip->ip_lockres.l_lock);
232 if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
233 event = POLLIN | POLLRDNORM;
234 spin_unlock(&ip->ip_lockres.l_lock);
235
236 return event;
237}
238
182static ssize_t dlmfs_file_read(struct file *filp, 239static ssize_t dlmfs_file_read(struct file *filp,
183 char __user *buf, 240 char __user *buf,
184 size_t count, 241 size_t count,
185 loff_t *ppos) 242 loff_t *ppos)
186{ 243{
187 int bytes_left; 244 int bytes_left;
188 ssize_t readlen; 245 ssize_t readlen, got;
189 char *lvb_buf; 246 char *lvb_buf;
190 struct inode *inode = filp->f_path.dentry->d_inode; 247 struct inode *inode = filp->f_path.dentry->d_inode;
191 248
@@ -211,9 +268,13 @@ static ssize_t dlmfs_file_read(struct file *filp,
211 if (!lvb_buf) 268 if (!lvb_buf)
212 return -ENOMEM; 269 return -ENOMEM;
213 270
214 user_dlm_read_lvb(inode, lvb_buf, readlen); 271 got = user_dlm_read_lvb(inode, lvb_buf, readlen);
215 bytes_left = __copy_to_user(buf, lvb_buf, readlen); 272 if (got) {
216 readlen -= bytes_left; 273 BUG_ON(got != readlen);
274 bytes_left = __copy_to_user(buf, lvb_buf, readlen);
275 readlen -= bytes_left;
276 } else
277 readlen = 0;
217 278
218 kfree(lvb_buf); 279 kfree(lvb_buf);
219 280
@@ -272,7 +333,7 @@ static void dlmfs_init_once(void *foo)
272 struct dlmfs_inode_private *ip = 333 struct dlmfs_inode_private *ip =
273 (struct dlmfs_inode_private *) foo; 334 (struct dlmfs_inode_private *) foo;
274 335
275 ip->ip_dlm = NULL; 336 ip->ip_conn = NULL;
276 ip->ip_parent = NULL; 337 ip->ip_parent = NULL;
277 338
278 inode_init_once(&ip->ip_vfs_inode); 339 inode_init_once(&ip->ip_vfs_inode);
@@ -314,14 +375,14 @@ static void dlmfs_clear_inode(struct inode *inode)
314 goto clear_fields; 375 goto clear_fields;
315 } 376 }
316 377
317 mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm); 378 mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
318 /* we must be a directory. If required, lets unregister the 379 /* we must be a directory. If required, lets unregister the
319 * dlm context now. */ 380 * dlm context now. */
320 if (ip->ip_dlm) 381 if (ip->ip_conn)
321 user_dlm_unregister_context(ip->ip_dlm); 382 user_dlm_unregister(ip->ip_conn);
322clear_fields: 383clear_fields:
323 ip->ip_parent = NULL; 384 ip->ip_parent = NULL;
324 ip->ip_dlm = NULL; 385 ip->ip_conn = NULL;
325} 386}
326 387
327static struct backing_dev_info dlmfs_backing_dev_info = { 388static struct backing_dev_info dlmfs_backing_dev_info = {
@@ -371,7 +432,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
371 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 432 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
372 433
373 ip = DLMFS_I(inode); 434 ip = DLMFS_I(inode);
374 ip->ip_dlm = DLMFS_I(parent)->ip_dlm; 435 ip->ip_conn = DLMFS_I(parent)->ip_conn;
375 436
376 switch (mode & S_IFMT) { 437 switch (mode & S_IFMT) {
377 default: 438 default:
@@ -425,13 +486,12 @@ static int dlmfs_mkdir(struct inode * dir,
425 struct inode *inode = NULL; 486 struct inode *inode = NULL;
426 struct qstr *domain = &dentry->d_name; 487 struct qstr *domain = &dentry->d_name;
427 struct dlmfs_inode_private *ip; 488 struct dlmfs_inode_private *ip;
428 struct dlm_ctxt *dlm; 489 struct ocfs2_cluster_connection *conn;
429 struct dlm_protocol_version proto = user_locking_protocol;
430 490
431 mlog(0, "mkdir %.*s\n", domain->len, domain->name); 491 mlog(0, "mkdir %.*s\n", domain->len, domain->name);
432 492
433 /* verify that we have a proper domain */ 493 /* verify that we have a proper domain */
434 if (domain->len >= O2NM_MAX_NAME_LEN) { 494 if (domain->len >= GROUP_NAME_MAX) {
435 status = -EINVAL; 495 status = -EINVAL;
436 mlog(ML_ERROR, "invalid domain name for directory.\n"); 496 mlog(ML_ERROR, "invalid domain name for directory.\n");
437 goto bail; 497 goto bail;
@@ -446,14 +506,14 @@ static int dlmfs_mkdir(struct inode * dir,
446 506
447 ip = DLMFS_I(inode); 507 ip = DLMFS_I(inode);
448 508
449 dlm = user_dlm_register_context(domain, &proto); 509 conn = user_dlm_register(domain);
450 if (IS_ERR(dlm)) { 510 if (IS_ERR(conn)) {
451 status = PTR_ERR(dlm); 511 status = PTR_ERR(conn);
452 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", 512 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
453 status, domain->len, domain->name); 513 status, domain->len, domain->name);
454 goto bail; 514 goto bail;
455 } 515 }
456 ip->ip_dlm = dlm; 516 ip->ip_conn = conn;
457 517
458 inc_nlink(dir); 518 inc_nlink(dir);
459 d_instantiate(dentry, inode); 519 d_instantiate(dentry, inode);
@@ -549,6 +609,7 @@ static int dlmfs_fill_super(struct super_block * sb,
549static const struct file_operations dlmfs_file_operations = { 609static const struct file_operations dlmfs_file_operations = {
550 .open = dlmfs_file_open, 610 .open = dlmfs_file_open,
551 .release = dlmfs_file_release, 611 .release = dlmfs_file_release,
612 .poll = dlmfs_file_poll,
552 .read = dlmfs_file_read, 613 .read = dlmfs_file_read,
553 .write = dlmfs_file_write, 614 .write = dlmfs_file_write,
554}; 615};
@@ -576,6 +637,7 @@ static const struct super_operations dlmfs_ops = {
576 637
577static const struct inode_operations dlmfs_file_inode_operations = { 638static const struct inode_operations dlmfs_file_inode_operations = {
578 .getattr = simple_getattr, 639 .getattr = simple_getattr,
640 .setattr = dlmfs_file_setattr,
579}; 641};
580 642
581static int dlmfs_get_sb(struct file_system_type *fs_type, 643static int dlmfs_get_sb(struct file_system_type *fs_type,
@@ -620,6 +682,7 @@ static int __init init_dlmfs_fs(void)
620 } 682 }
621 cleanup_worker = 1; 683 cleanup_worker = 1;
622 684
685 user_dlm_set_locking_protocol();
623 status = register_filesystem(&dlmfs_fs_type); 686 status = register_filesystem(&dlmfs_fs_type);
624bail: 687bail:
625 if (status) { 688 if (status) {
diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlmfs/dlmfsver.c
index a733b3321f83..a733b3321f83 100644
--- a/fs/ocfs2/dlm/dlmfsver.c
+++ b/fs/ocfs2/dlmfs/dlmfsver.c
diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlmfs/dlmfsver.h
index f35eadbed25c..f35eadbed25c 100644
--- a/fs/ocfs2/dlm/dlmfsver.h
+++ b/fs/ocfs2/dlmfs/dlmfsver.h
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 4cb1d3dae250..0499e3fb7bdb 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -34,18 +34,19 @@
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/crc32.h> 35#include <linux/crc32.h>
36 36
37 37#include "ocfs2_lockingver.h"
38#include "cluster/nodemanager.h" 38#include "stackglue.h"
39#include "cluster/heartbeat.h"
40#include "cluster/tcp.h"
41
42#include "dlmapi.h"
43
44#include "userdlm.h" 39#include "userdlm.h"
45 40
46#define MLOG_MASK_PREFIX ML_DLMFS 41#define MLOG_MASK_PREFIX ML_DLMFS
47#include "cluster/masklog.h" 42#include "cluster/masklog.h"
48 43
44
45static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
46{
47 return container_of(lksb, struct user_lock_res, l_lksb);
48}
49
49static inline int user_check_wait_flag(struct user_lock_res *lockres, 50static inline int user_check_wait_flag(struct user_lock_res *lockres,
50 int flag) 51 int flag)
51{ 52{
@@ -73,15 +74,15 @@ static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
73} 74}
74 75
75/* I heart container_of... */ 76/* I heart container_of... */
76static inline struct dlm_ctxt * 77static inline struct ocfs2_cluster_connection *
77dlm_ctxt_from_user_lockres(struct user_lock_res *lockres) 78cluster_connection_from_user_lockres(struct user_lock_res *lockres)
78{ 79{
79 struct dlmfs_inode_private *ip; 80 struct dlmfs_inode_private *ip;
80 81
81 ip = container_of(lockres, 82 ip = container_of(lockres,
82 struct dlmfs_inode_private, 83 struct dlmfs_inode_private,
83 ip_lockres); 84 ip_lockres);
84 return ip->ip_dlm; 85 return ip->ip_conn;
85} 86}
86 87
87static struct inode * 88static struct inode *
@@ -103,9 +104,9 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
103} 104}
104 105
105#define user_log_dlm_error(_func, _stat, _lockres) do { \ 106#define user_log_dlm_error(_func, _stat, _lockres) do { \
106 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ 107 mlog(ML_ERROR, "Dlm error %d while calling %s on " \
107 "resource %.*s: %s\n", dlm_errname(_stat), _func, \ 108 "resource %.*s\n", _stat, _func, \
108 _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \ 109 _lockres->l_namelen, _lockres->l_name); \
109} while (0) 110} while (0)
110 111
111/* WARNING: This function lives in a world where the only three lock 112/* WARNING: This function lives in a world where the only three lock
@@ -113,34 +114,35 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
113 * lock types are added. */ 114 * lock types are added. */
114static inline int user_highest_compat_lock_level(int level) 115static inline int user_highest_compat_lock_level(int level)
115{ 116{
116 int new_level = LKM_EXMODE; 117 int new_level = DLM_LOCK_EX;
117 118
118 if (level == LKM_EXMODE) 119 if (level == DLM_LOCK_EX)
119 new_level = LKM_NLMODE; 120 new_level = DLM_LOCK_NL;
120 else if (level == LKM_PRMODE) 121 else if (level == DLM_LOCK_PR)
121 new_level = LKM_PRMODE; 122 new_level = DLM_LOCK_PR;
122 return new_level; 123 return new_level;
123} 124}
124 125
125static void user_ast(void *opaque) 126static void user_ast(struct ocfs2_dlm_lksb *lksb)
126{ 127{
127 struct user_lock_res *lockres = opaque; 128 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
128 struct dlm_lockstatus *lksb; 129 int status;
129 130
130 mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen, 131 mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
131 lockres->l_name); 132 lockres->l_namelen, lockres->l_name, lockres->l_level,
133 lockres->l_requested);
132 134
133 spin_lock(&lockres->l_lock); 135 spin_lock(&lockres->l_lock);
134 136
135 lksb = &(lockres->l_lksb); 137 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
136 if (lksb->status != DLM_NORMAL) { 138 if (status) {
137 mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", 139 mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
138 lksb->status, lockres->l_namelen, lockres->l_name); 140 status, lockres->l_namelen, lockres->l_name);
139 spin_unlock(&lockres->l_lock); 141 spin_unlock(&lockres->l_lock);
140 return; 142 return;
141 } 143 }
142 144
143 mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, 145 mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
144 "Lockres %.*s, requested ivmode. flags 0x%x\n", 146 "Lockres %.*s, requested ivmode. flags 0x%x\n",
145 lockres->l_namelen, lockres->l_name, lockres->l_flags); 147 lockres->l_namelen, lockres->l_name, lockres->l_flags);
146 148
@@ -148,13 +150,13 @@ static void user_ast(void *opaque)
148 if (lockres->l_requested < lockres->l_level) { 150 if (lockres->l_requested < lockres->l_level) {
149 if (lockres->l_requested <= 151 if (lockres->l_requested <=
150 user_highest_compat_lock_level(lockres->l_blocking)) { 152 user_highest_compat_lock_level(lockres->l_blocking)) {
151 lockres->l_blocking = LKM_NLMODE; 153 lockres->l_blocking = DLM_LOCK_NL;
152 lockres->l_flags &= ~USER_LOCK_BLOCKED; 154 lockres->l_flags &= ~USER_LOCK_BLOCKED;
153 } 155 }
154 } 156 }
155 157
156 lockres->l_level = lockres->l_requested; 158 lockres->l_level = lockres->l_requested;
157 lockres->l_requested = LKM_IVMODE; 159 lockres->l_requested = DLM_LOCK_IV;
158 lockres->l_flags |= USER_LOCK_ATTACHED; 160 lockres->l_flags |= USER_LOCK_ATTACHED;
159 lockres->l_flags &= ~USER_LOCK_BUSY; 161 lockres->l_flags &= ~USER_LOCK_BUSY;
160 162
@@ -193,11 +195,11 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
193 return; 195 return;
194 196
195 switch (lockres->l_blocking) { 197 switch (lockres->l_blocking) {
196 case LKM_EXMODE: 198 case DLM_LOCK_EX:
197 if (!lockres->l_ex_holders && !lockres->l_ro_holders) 199 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
198 queue = 1; 200 queue = 1;
199 break; 201 break;
200 case LKM_PRMODE: 202 case DLM_LOCK_PR:
201 if (!lockres->l_ex_holders) 203 if (!lockres->l_ex_holders)
202 queue = 1; 204 queue = 1;
203 break; 205 break;
@@ -209,12 +211,12 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
209 __user_dlm_queue_lockres(lockres); 211 __user_dlm_queue_lockres(lockres);
210} 212}
211 213
212static void user_bast(void *opaque, int level) 214static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
213{ 215{
214 struct user_lock_res *lockres = opaque; 216 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
215 217
216 mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n", 218 mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
217 lockres->l_namelen, lockres->l_name, level); 219 lockres->l_namelen, lockres->l_name, level, lockres->l_level);
218 220
219 spin_lock(&lockres->l_lock); 221 spin_lock(&lockres->l_lock);
220 lockres->l_flags |= USER_LOCK_BLOCKED; 222 lockres->l_flags |= USER_LOCK_BLOCKED;
@@ -227,15 +229,15 @@ static void user_bast(void *opaque, int level)
227 wake_up(&lockres->l_event); 229 wake_up(&lockres->l_event);
228} 230}
229 231
230static void user_unlock_ast(void *opaque, enum dlm_status status) 232static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
231{ 233{
232 struct user_lock_res *lockres = opaque; 234 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
233 235
234 mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen, 236 mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
235 lockres->l_name); 237 lockres->l_namelen, lockres->l_name, lockres->l_flags);
236 238
237 if (status != DLM_NORMAL && status != DLM_CANCELGRANT) 239 if (status)
238 mlog(ML_ERROR, "Dlm returns status %d\n", status); 240 mlog(ML_ERROR, "dlm returns status %d\n", status);
239 241
240 spin_lock(&lockres->l_lock); 242 spin_lock(&lockres->l_lock);
241 /* The teardown flag gets set early during the unlock process, 243 /* The teardown flag gets set early during the unlock process,
@@ -243,7 +245,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
243 * for a concurrent cancel. */ 245 * for a concurrent cancel. */
244 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN 246 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
245 && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { 247 && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
246 lockres->l_level = LKM_IVMODE; 248 lockres->l_level = DLM_LOCK_IV;
247 } else if (status == DLM_CANCELGRANT) { 249 } else if (status == DLM_CANCELGRANT) {
248 /* We tried to cancel a convert request, but it was 250 /* We tried to cancel a convert request, but it was
249 * already granted. Don't clear the busy flag - the 251 * already granted. Don't clear the busy flag - the
@@ -254,7 +256,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
254 } else { 256 } else {
255 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); 257 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
256 /* Cancel succeeded, we want to re-queue */ 258 /* Cancel succeeded, we want to re-queue */
257 lockres->l_requested = LKM_IVMODE; /* cancel an 259 lockres->l_requested = DLM_LOCK_IV; /* cancel an
258 * upconvert 260 * upconvert
259 * request. */ 261 * request. */
260 lockres->l_flags &= ~USER_LOCK_IN_CANCEL; 262 lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
@@ -271,6 +273,21 @@ out_noclear:
271 wake_up(&lockres->l_event); 273 wake_up(&lockres->l_event);
272} 274}
273 275
276/*
277 * This is the userdlmfs locking protocol version.
278 *
279 * See fs/ocfs2/dlmglue.c for more details on locking versions.
280 */
281static struct ocfs2_locking_protocol user_dlm_lproto = {
282 .lp_max_version = {
283 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
284 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
285 },
286 .lp_lock_ast = user_ast,
287 .lp_blocking_ast = user_bast,
288 .lp_unlock_ast = user_unlock_ast,
289};
290
274static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) 291static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
275{ 292{
276 struct inode *inode; 293 struct inode *inode;
@@ -283,10 +300,10 @@ static void user_dlm_unblock_lock(struct work_struct *work)
283 int new_level, status; 300 int new_level, status;
284 struct user_lock_res *lockres = 301 struct user_lock_res *lockres =
285 container_of(work, struct user_lock_res, l_work); 302 container_of(work, struct user_lock_res, l_work);
286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); 303 struct ocfs2_cluster_connection *conn =
304 cluster_connection_from_user_lockres(lockres);
287 305
288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen, 306 mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
289 lockres->l_name);
290 307
291 spin_lock(&lockres->l_lock); 308 spin_lock(&lockres->l_lock);
292 309
@@ -304,17 +321,23 @@ static void user_dlm_unblock_lock(struct work_struct *work)
304 * flag, and finally we might get another bast which re-queues 321 * flag, and finally we might get another bast which re-queues
305 * us before our ast for the downconvert is called. */ 322 * us before our ast for the downconvert is called. */
306 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { 323 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
324 mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
325 lockres->l_namelen, lockres->l_name);
307 spin_unlock(&lockres->l_lock); 326 spin_unlock(&lockres->l_lock);
308 goto drop_ref; 327 goto drop_ref;
309 } 328 }
310 329
311 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { 330 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
331 mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
332 lockres->l_namelen, lockres->l_name);
312 spin_unlock(&lockres->l_lock); 333 spin_unlock(&lockres->l_lock);
313 goto drop_ref; 334 goto drop_ref;
314 } 335 }
315 336
316 if (lockres->l_flags & USER_LOCK_BUSY) { 337 if (lockres->l_flags & USER_LOCK_BUSY) {
317 if (lockres->l_flags & USER_LOCK_IN_CANCEL) { 338 if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
339 mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
340 lockres->l_namelen, lockres->l_name);
318 spin_unlock(&lockres->l_lock); 341 spin_unlock(&lockres->l_lock);
319 goto drop_ref; 342 goto drop_ref;
320 } 343 }
@@ -322,32 +345,31 @@ static void user_dlm_unblock_lock(struct work_struct *work)
322 lockres->l_flags |= USER_LOCK_IN_CANCEL; 345 lockres->l_flags |= USER_LOCK_IN_CANCEL;
323 spin_unlock(&lockres->l_lock); 346 spin_unlock(&lockres->l_lock);
324 347
325 status = dlmunlock(dlm, 348 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
326 &lockres->l_lksb, 349 DLM_LKF_CANCEL);
327 LKM_CANCEL, 350 if (status)
328 user_unlock_ast, 351 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
329 lockres);
330 if (status != DLM_NORMAL)
331 user_log_dlm_error("dlmunlock", status, lockres);
332 goto drop_ref; 352 goto drop_ref;
333 } 353 }
334 354
335 /* If there are still incompat holders, we can exit safely 355 /* If there are still incompat holders, we can exit safely
336 * without worrying about re-queueing this lock as that will 356 * without worrying about re-queueing this lock as that will
337 * happen on the last call to user_cluster_unlock. */ 357 * happen on the last call to user_cluster_unlock. */
338 if ((lockres->l_blocking == LKM_EXMODE) 358 if ((lockres->l_blocking == DLM_LOCK_EX)
339 && (lockres->l_ex_holders || lockres->l_ro_holders)) { 359 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
340 spin_unlock(&lockres->l_lock); 360 spin_unlock(&lockres->l_lock);
341 mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n", 361 mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
342 lockres->l_ro_holders, lockres->l_ex_holders); 362 lockres->l_namelen, lockres->l_name,
363 lockres->l_ex_holders, lockres->l_ro_holders);
343 goto drop_ref; 364 goto drop_ref;
344 } 365 }
345 366
346 if ((lockres->l_blocking == LKM_PRMODE) 367 if ((lockres->l_blocking == DLM_LOCK_PR)
347 && lockres->l_ex_holders) { 368 && lockres->l_ex_holders) {
348 spin_unlock(&lockres->l_lock); 369 spin_unlock(&lockres->l_lock);
349 mlog(0, "can't downconvert for pr: ex = %u\n", 370 mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
350 lockres->l_ex_holders); 371 lockres->l_namelen, lockres->l_name,
372 lockres->l_ex_holders);
351 goto drop_ref; 373 goto drop_ref;
352 } 374 }
353 375
@@ -355,22 +377,17 @@ static void user_dlm_unblock_lock(struct work_struct *work)
355 new_level = user_highest_compat_lock_level(lockres->l_blocking); 377 new_level = user_highest_compat_lock_level(lockres->l_blocking);
356 lockres->l_requested = new_level; 378 lockres->l_requested = new_level;
357 lockres->l_flags |= USER_LOCK_BUSY; 379 lockres->l_flags |= USER_LOCK_BUSY;
358 mlog(0, "Downconvert lock from %d to %d\n", 380 mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
359 lockres->l_level, new_level); 381 lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
360 spin_unlock(&lockres->l_lock); 382 spin_unlock(&lockres->l_lock);
361 383
362 /* need lock downconvert request now... */ 384 /* need lock downconvert request now... */
363 status = dlmlock(dlm, 385 status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
364 new_level, 386 DLM_LKF_CONVERT|DLM_LKF_VALBLK,
365 &lockres->l_lksb, 387 lockres->l_name,
366 LKM_CONVERT|LKM_VALBLK, 388 lockres->l_namelen);
367 lockres->l_name, 389 if (status) {
368 lockres->l_namelen, 390 user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
369 user_ast,
370 lockres,
371 user_bast);
372 if (status != DLM_NORMAL) {
373 user_log_dlm_error("dlmlock", status, lockres);
374 user_recover_from_dlm_error(lockres); 391 user_recover_from_dlm_error(lockres);
375 } 392 }
376 393
@@ -382,10 +399,10 @@ static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
382 int level) 399 int level)
383{ 400{
384 switch(level) { 401 switch(level) {
385 case LKM_EXMODE: 402 case DLM_LOCK_EX:
386 lockres->l_ex_holders++; 403 lockres->l_ex_holders++;
387 break; 404 break;
388 case LKM_PRMODE: 405 case DLM_LOCK_PR:
389 lockres->l_ro_holders++; 406 lockres->l_ro_holders++;
390 break; 407 break;
391 default: 408 default:
@@ -410,20 +427,19 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres,
410 int lkm_flags) 427 int lkm_flags)
411{ 428{
412 int status, local_flags; 429 int status, local_flags;
413 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); 430 struct ocfs2_cluster_connection *conn =
431 cluster_connection_from_user_lockres(lockres);
414 432
415 if (level != LKM_EXMODE && 433 if (level != DLM_LOCK_EX &&
416 level != LKM_PRMODE) { 434 level != DLM_LOCK_PR) {
417 mlog(ML_ERROR, "lockres %.*s: invalid request!\n", 435 mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
418 lockres->l_namelen, lockres->l_name); 436 lockres->l_namelen, lockres->l_name);
419 status = -EINVAL; 437 status = -EINVAL;
420 goto bail; 438 goto bail;
421 } 439 }
422 440
423 mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n", 441 mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
424 lockres->l_namelen, lockres->l_name, 442 lockres->l_namelen, lockres->l_name, level, lkm_flags);
425 (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE",
426 lkm_flags);
427 443
428again: 444again:
429 if (signal_pending(current)) { 445 if (signal_pending(current)) {
@@ -457,35 +473,26 @@ again:
457 } 473 }
458 474
459 if (level > lockres->l_level) { 475 if (level > lockres->l_level) {
460 local_flags = lkm_flags | LKM_VALBLK; 476 local_flags = lkm_flags | DLM_LKF_VALBLK;
461 if (lockres->l_level != LKM_IVMODE) 477 if (lockres->l_level != DLM_LOCK_IV)
462 local_flags |= LKM_CONVERT; 478 local_flags |= DLM_LKF_CONVERT;
463 479
464 lockres->l_requested = level; 480 lockres->l_requested = level;
465 lockres->l_flags |= USER_LOCK_BUSY; 481 lockres->l_flags |= USER_LOCK_BUSY;
466 spin_unlock(&lockres->l_lock); 482 spin_unlock(&lockres->l_lock);
467 483
468 BUG_ON(level == LKM_IVMODE); 484 BUG_ON(level == DLM_LOCK_IV);
469 BUG_ON(level == LKM_NLMODE); 485 BUG_ON(level == DLM_LOCK_NL);
470 486
471 /* call dlm_lock to upgrade lock now */ 487 /* call dlm_lock to upgrade lock now */
472 status = dlmlock(dlm, 488 status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
473 level, 489 local_flags, lockres->l_name,
474 &lockres->l_lksb, 490 lockres->l_namelen);
475 local_flags, 491 if (status) {
476 lockres->l_name, 492 if ((lkm_flags & DLM_LKF_NOQUEUE) &&
477 lockres->l_namelen, 493 (status != -EAGAIN))
478 user_ast, 494 user_log_dlm_error("ocfs2_dlm_lock",
479 lockres, 495 status, lockres);
480 user_bast);
481 if (status != DLM_NORMAL) {
482 if ((lkm_flags & LKM_NOQUEUE) &&
483 (status == DLM_NOTQUEUED))
484 status = -EAGAIN;
485 else {
486 user_log_dlm_error("dlmlock", status, lockres);
487 status = -EINVAL;
488 }
489 user_recover_from_dlm_error(lockres); 496 user_recover_from_dlm_error(lockres);
490 goto bail; 497 goto bail;
491 } 498 }
@@ -506,11 +513,11 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
506 int level) 513 int level)
507{ 514{
508 switch(level) { 515 switch(level) {
509 case LKM_EXMODE: 516 case DLM_LOCK_EX:
510 BUG_ON(!lockres->l_ex_holders); 517 BUG_ON(!lockres->l_ex_holders);
511 lockres->l_ex_holders--; 518 lockres->l_ex_holders--;
512 break; 519 break;
513 case LKM_PRMODE: 520 case DLM_LOCK_PR:
514 BUG_ON(!lockres->l_ro_holders); 521 BUG_ON(!lockres->l_ro_holders);
515 lockres->l_ro_holders--; 522 lockres->l_ro_holders--;
516 break; 523 break;
@@ -522,8 +529,8 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
522void user_dlm_cluster_unlock(struct user_lock_res *lockres, 529void user_dlm_cluster_unlock(struct user_lock_res *lockres,
523 int level) 530 int level)
524{ 531{
525 if (level != LKM_EXMODE && 532 if (level != DLM_LOCK_EX &&
526 level != LKM_PRMODE) { 533 level != DLM_LOCK_PR) {
527 mlog(ML_ERROR, "lockres %.*s: invalid request!\n", 534 mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
528 lockres->l_namelen, lockres->l_name); 535 lockres->l_namelen, lockres->l_name);
529 return; 536 return;
@@ -540,33 +547,40 @@ void user_dlm_write_lvb(struct inode *inode,
540 unsigned int len) 547 unsigned int len)
541{ 548{
542 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; 549 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
543 char *lvb = lockres->l_lksb.lvb; 550 char *lvb;
544 551
545 BUG_ON(len > DLM_LVB_LEN); 552 BUG_ON(len > DLM_LVB_LEN);
546 553
547 spin_lock(&lockres->l_lock); 554 spin_lock(&lockres->l_lock);
548 555
549 BUG_ON(lockres->l_level < LKM_EXMODE); 556 BUG_ON(lockres->l_level < DLM_LOCK_EX);
557 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
550 memcpy(lvb, val, len); 558 memcpy(lvb, val, len);
551 559
552 spin_unlock(&lockres->l_lock); 560 spin_unlock(&lockres->l_lock);
553} 561}
554 562
555void user_dlm_read_lvb(struct inode *inode, 563ssize_t user_dlm_read_lvb(struct inode *inode,
556 char *val, 564 char *val,
557 unsigned int len) 565 unsigned int len)
558{ 566{
559 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; 567 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
560 char *lvb = lockres->l_lksb.lvb; 568 char *lvb;
569 ssize_t ret = len;
561 570
562 BUG_ON(len > DLM_LVB_LEN); 571 BUG_ON(len > DLM_LVB_LEN);
563 572
564 spin_lock(&lockres->l_lock); 573 spin_lock(&lockres->l_lock);
565 574
566 BUG_ON(lockres->l_level < LKM_PRMODE); 575 BUG_ON(lockres->l_level < DLM_LOCK_PR);
567 memcpy(val, lvb, len); 576 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
577 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
578 memcpy(val, lvb, len);
579 } else
580 ret = 0;
568 581
569 spin_unlock(&lockres->l_lock); 582 spin_unlock(&lockres->l_lock);
583 return ret;
570} 584}
571 585
572void user_dlm_lock_res_init(struct user_lock_res *lockres, 586void user_dlm_lock_res_init(struct user_lock_res *lockres,
@@ -576,9 +590,9 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
576 590
577 spin_lock_init(&lockres->l_lock); 591 spin_lock_init(&lockres->l_lock);
578 init_waitqueue_head(&lockres->l_event); 592 init_waitqueue_head(&lockres->l_event);
579 lockres->l_level = LKM_IVMODE; 593 lockres->l_level = DLM_LOCK_IV;
580 lockres->l_requested = LKM_IVMODE; 594 lockres->l_requested = DLM_LOCK_IV;
581 lockres->l_blocking = LKM_IVMODE; 595 lockres->l_blocking = DLM_LOCK_IV;
582 596
583 /* should have been checked before getting here. */ 597 /* should have been checked before getting here. */
584 BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN); 598 BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
@@ -592,9 +606,10 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
592int user_dlm_destroy_lock(struct user_lock_res *lockres) 606int user_dlm_destroy_lock(struct user_lock_res *lockres)
593{ 607{
594 int status = -EBUSY; 608 int status = -EBUSY;
595 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); 609 struct ocfs2_cluster_connection *conn =
610 cluster_connection_from_user_lockres(lockres);
596 611
597 mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name); 612 mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
598 613
599 spin_lock(&lockres->l_lock); 614 spin_lock(&lockres->l_lock);
600 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { 615 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
@@ -627,14 +642,9 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
627 lockres->l_flags |= USER_LOCK_BUSY; 642 lockres->l_flags |= USER_LOCK_BUSY;
628 spin_unlock(&lockres->l_lock); 643 spin_unlock(&lockres->l_lock);
629 644
630 status = dlmunlock(dlm, 645 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
631 &lockres->l_lksb, 646 if (status) {
632 LKM_VALBLK, 647 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
633 user_unlock_ast,
634 lockres);
635 if (status != DLM_NORMAL) {
636 user_log_dlm_error("dlmunlock", status, lockres);
637 status = -EINVAL;
638 goto bail; 648 goto bail;
639 } 649 }
640 650
@@ -645,32 +655,34 @@ bail:
645 return status; 655 return status;
646} 656}
647 657
648struct dlm_ctxt *user_dlm_register_context(struct qstr *name, 658static void user_dlm_recovery_handler_noop(int node_num,
649 struct dlm_protocol_version *proto) 659 void *recovery_data)
650{ 660{
651 struct dlm_ctxt *dlm; 661 /* We ignore recovery events */
652 u32 dlm_key; 662 return;
653 char *domain; 663}
654
655 domain = kmalloc(name->len + 1, GFP_NOFS);
656 if (!domain) {
657 mlog_errno(-ENOMEM);
658 return ERR_PTR(-ENOMEM);
659 }
660 664
661 dlm_key = crc32_le(0, name->name, name->len); 665void user_dlm_set_locking_protocol(void)
666{
667 ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
668}
662 669
663 snprintf(domain, name->len + 1, "%.*s", name->len, name->name); 670struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
671{
672 int rc;
673 struct ocfs2_cluster_connection *conn;
664 674
665 dlm = dlm_register_domain(domain, dlm_key, proto); 675 rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
666 if (IS_ERR(dlm)) 676 &user_dlm_lproto,
667 mlog_errno(PTR_ERR(dlm)); 677 user_dlm_recovery_handler_noop,
678 NULL, &conn);
679 if (rc)
680 mlog_errno(rc);
668 681
669 kfree(domain); 682 return rc ? ERR_PTR(rc) : conn;
670 return dlm;
671} 683}
672 684
673void user_dlm_unregister_context(struct dlm_ctxt *dlm) 685void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
674{ 686{
675 dlm_unregister_domain(dlm); 687 ocfs2_cluster_disconnect(conn, 0);
676} 688}
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h
index 0c3cc03c61fa..3b42d79531d7 100644
--- a/fs/ocfs2/dlm/userdlm.h
+++ b/fs/ocfs2/dlmfs/userdlm.h
@@ -57,7 +57,7 @@ struct user_lock_res {
57 int l_level; 57 int l_level;
58 unsigned int l_ro_holders; 58 unsigned int l_ro_holders;
59 unsigned int l_ex_holders; 59 unsigned int l_ex_holders;
60 struct dlm_lockstatus l_lksb; 60 struct ocfs2_dlm_lksb l_lksb;
61 61
62 int l_requested; 62 int l_requested;
63 int l_blocking; 63 int l_blocking;
@@ -80,15 +80,15 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres,
80void user_dlm_write_lvb(struct inode *inode, 80void user_dlm_write_lvb(struct inode *inode,
81 const char *val, 81 const char *val,
82 unsigned int len); 82 unsigned int len);
83void user_dlm_read_lvb(struct inode *inode, 83ssize_t user_dlm_read_lvb(struct inode *inode,
84 char *val, 84 char *val,
85 unsigned int len); 85 unsigned int len);
86struct dlm_ctxt *user_dlm_register_context(struct qstr *name, 86struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name);
87 struct dlm_protocol_version *proto); 87void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
88void user_dlm_unregister_context(struct dlm_ctxt *dlm); 88void user_dlm_set_locking_protocol(void);
89 89
90struct dlmfs_inode_private { 90struct dlmfs_inode_private {
91 struct dlm_ctxt *ip_dlm; 91 struct ocfs2_cluster_connection *ip_conn;
92 92
93 struct user_lock_res ip_lockres; /* unused for directories. */ 93 struct user_lock_res ip_lockres; /* unused for directories. */
94 struct inode *ip_parent; 94 struct inode *ip_parent;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e044019cb3b1..8298608d4165 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -297,6 +297,11 @@ static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
297 lockres->l_type == OCFS2_LOCK_TYPE_OPEN; 297 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
298} 298}
299 299
300static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
301{
302 return container_of(lksb, struct ocfs2_lock_res, l_lksb);
303}
304
300static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) 305static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
301{ 306{
302 BUG_ON(!ocfs2_is_inode_lock(lockres)); 307 BUG_ON(!ocfs2_is_inode_lock(lockres));
@@ -927,6 +932,10 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
927 lockres->l_blocking = level; 932 lockres->l_blocking = level;
928 } 933 }
929 934
935 mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
936 lockres->l_name, level, lockres->l_level, lockres->l_blocking,
937 needs_downconvert);
938
930 if (needs_downconvert) 939 if (needs_downconvert)
931 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); 940 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
932 941
@@ -1040,18 +1049,17 @@ static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1040 return lockres->l_pending_gen; 1049 return lockres->l_pending_gen;
1041} 1050}
1042 1051
1043 1052static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1044static void ocfs2_blocking_ast(void *opaque, int level)
1045{ 1053{
1046 struct ocfs2_lock_res *lockres = opaque; 1054 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1047 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); 1055 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1048 int needs_downconvert; 1056 int needs_downconvert;
1049 unsigned long flags; 1057 unsigned long flags;
1050 1058
1051 BUG_ON(level <= DLM_LOCK_NL); 1059 BUG_ON(level <= DLM_LOCK_NL);
1052 1060
1053 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n", 1061 mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1054 lockres->l_name, level, lockres->l_level, 1062 "type %s\n", lockres->l_name, level, lockres->l_level,
1055 ocfs2_lock_type_string(lockres->l_type)); 1063 ocfs2_lock_type_string(lockres->l_type));
1056 1064
1057 /* 1065 /*
@@ -1072,9 +1080,9 @@ static void ocfs2_blocking_ast(void *opaque, int level)
1072 ocfs2_wake_downconvert_thread(osb); 1080 ocfs2_wake_downconvert_thread(osb);
1073} 1081}
1074 1082
1075static void ocfs2_locking_ast(void *opaque) 1083static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1076{ 1084{
1077 struct ocfs2_lock_res *lockres = opaque; 1085 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1078 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); 1086 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1079 unsigned long flags; 1087 unsigned long flags;
1080 int status; 1088 int status;
@@ -1095,6 +1103,10 @@ static void ocfs2_locking_ast(void *opaque)
1095 return; 1103 return;
1096 } 1104 }
1097 1105
1106 mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1107 "level %d => %d\n", lockres->l_name, lockres->l_action,
1108 lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1109
1098 switch(lockres->l_action) { 1110 switch(lockres->l_action) {
1099 case OCFS2_AST_ATTACH: 1111 case OCFS2_AST_ATTACH:
1100 ocfs2_generic_handle_attach_action(lockres); 1112 ocfs2_generic_handle_attach_action(lockres);
@@ -1107,8 +1119,8 @@ static void ocfs2_locking_ast(void *opaque)
1107 ocfs2_generic_handle_downconvert_action(lockres); 1119 ocfs2_generic_handle_downconvert_action(lockres);
1108 break; 1120 break;
1109 default: 1121 default:
1110 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u " 1122 mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1111 "lockres flags = 0x%lx, unlock action: %u\n", 1123 "flags 0x%lx, unlock: %u\n",
1112 lockres->l_name, lockres->l_action, lockres->l_flags, 1124 lockres->l_name, lockres->l_action, lockres->l_flags,
1113 lockres->l_unlock_action); 1125 lockres->l_unlock_action);
1114 BUG(); 1126 BUG();
@@ -1134,6 +1146,88 @@ out:
1134 spin_unlock_irqrestore(&lockres->l_lock, flags); 1146 spin_unlock_irqrestore(&lockres->l_lock, flags);
1135} 1147}
1136 1148
1149static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1150{
1151 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1152 unsigned long flags;
1153
1154 mlog_entry_void();
1155
1156 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1157 lockres->l_name, lockres->l_unlock_action);
1158
1159 spin_lock_irqsave(&lockres->l_lock, flags);
1160 if (error) {
1161 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1162 "unlock_action %d\n", error, lockres->l_name,
1163 lockres->l_unlock_action);
1164 spin_unlock_irqrestore(&lockres->l_lock, flags);
1165 mlog_exit_void();
1166 return;
1167 }
1168
1169 switch(lockres->l_unlock_action) {
1170 case OCFS2_UNLOCK_CANCEL_CONVERT:
1171 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1172 lockres->l_action = OCFS2_AST_INVALID;
1173 /* Downconvert thread may have requeued this lock, we
1174 * need to wake it. */
1175 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1176 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1177 break;
1178 case OCFS2_UNLOCK_DROP_LOCK:
1179 lockres->l_level = DLM_LOCK_IV;
1180 break;
1181 default:
1182 BUG();
1183 }
1184
1185 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1186 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1187 wake_up(&lockres->l_event);
1188 spin_unlock_irqrestore(&lockres->l_lock, flags);
1189
1190 mlog_exit_void();
1191}
1192
1193/*
1194 * This is the filesystem locking protocol. It provides the lock handling
1195 * hooks for the underlying DLM. It has a maximum version number.
1196 * The version number allows interoperability with systems running at
1197 * the same major number and an equal or smaller minor number.
1198 *
1199 * Whenever the filesystem does new things with locks (adds or removes a
1200 * lock, orders them differently, does different things underneath a lock),
1201 * the version must be changed. The protocol is negotiated when joining
1202 * the dlm domain. A node may join the domain if its major version is
1203 * identical to all other nodes and its minor version is greater than
1204 * or equal to all other nodes. When its minor version is greater than
1205 * the other nodes, it will run at the minor version specified by the
1206 * other nodes.
1207 *
1208 * If a locking change is made that will not be compatible with older
1209 * versions, the major number must be increased and the minor version set
1210 * to zero. If a change merely adds a behavior that can be disabled when
1211 * speaking to older versions, the minor version must be increased. If a
1212 * change adds a fully backwards compatible change (eg, LVB changes that
1213 * are just ignored by older versions), the version does not need to be
1214 * updated.
1215 */
1216static struct ocfs2_locking_protocol lproto = {
1217 .lp_max_version = {
1218 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1219 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1220 },
1221 .lp_lock_ast = ocfs2_locking_ast,
1222 .lp_blocking_ast = ocfs2_blocking_ast,
1223 .lp_unlock_ast = ocfs2_unlock_ast,
1224};
1225
1226void ocfs2_set_locking_protocol(void)
1227{
1228 ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1229}
1230
1137static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, 1231static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1138 int convert) 1232 int convert)
1139{ 1233{
@@ -1189,8 +1283,7 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
1189 &lockres->l_lksb, 1283 &lockres->l_lksb,
1190 dlm_flags, 1284 dlm_flags,
1191 lockres->l_name, 1285 lockres->l_name,
1192 OCFS2_LOCK_ID_MAX_LEN - 1, 1286 OCFS2_LOCK_ID_MAX_LEN - 1);
1193 lockres);
1194 lockres_clear_pending(lockres, gen, osb); 1287 lockres_clear_pending(lockres, gen, osb);
1195 if (ret) { 1288 if (ret) {
1196 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); 1289 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -1412,7 +1505,7 @@ again:
1412 BUG_ON(level == DLM_LOCK_IV); 1505 BUG_ON(level == DLM_LOCK_IV);
1413 BUG_ON(level == DLM_LOCK_NL); 1506 BUG_ON(level == DLM_LOCK_NL);
1414 1507
1415 mlog(0, "lock %s, convert from %d to level = %d\n", 1508 mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1416 lockres->l_name, lockres->l_level, level); 1509 lockres->l_name, lockres->l_level, level);
1417 1510
1418 /* call dlm_lock to upgrade lock now */ 1511 /* call dlm_lock to upgrade lock now */
@@ -1421,8 +1514,7 @@ again:
1421 &lockres->l_lksb, 1514 &lockres->l_lksb,
1422 lkm_flags, 1515 lkm_flags,
1423 lockres->l_name, 1516 lockres->l_name,
1424 OCFS2_LOCK_ID_MAX_LEN - 1, 1517 OCFS2_LOCK_ID_MAX_LEN - 1);
1425 lockres);
1426 lockres_clear_pending(lockres, gen, osb); 1518 lockres_clear_pending(lockres, gen, osb);
1427 if (ret) { 1519 if (ret) {
1428 if (!(lkm_flags & DLM_LKF_NOQUEUE) || 1520 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
@@ -1859,8 +1951,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
1859 spin_unlock_irqrestore(&lockres->l_lock, flags); 1951 spin_unlock_irqrestore(&lockres->l_lock, flags);
1860 1952
1861 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags, 1953 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1862 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1, 1954 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
1863 lockres);
1864 if (ret) { 1955 if (ret) {
1865 if (!trylock || (ret != -EAGAIN)) { 1956 if (!trylock || (ret != -EAGAIN)) {
1866 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); 1957 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -2989,7 +3080,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
2989 status = ocfs2_cluster_connect(osb->osb_cluster_stack, 3080 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
2990 osb->uuid_str, 3081 osb->uuid_str,
2991 strlen(osb->uuid_str), 3082 strlen(osb->uuid_str),
2992 ocfs2_do_node_down, osb, 3083 &lproto, ocfs2_do_node_down, osb,
2993 &conn); 3084 &conn);
2994 if (status) { 3085 if (status) {
2995 mlog_errno(status); 3086 mlog_errno(status);
@@ -3056,50 +3147,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3056 mlog_exit_void(); 3147 mlog_exit_void();
3057} 3148}
3058 3149
3059static void ocfs2_unlock_ast(void *opaque, int error)
3060{
3061 struct ocfs2_lock_res *lockres = opaque;
3062 unsigned long flags;
3063
3064 mlog_entry_void();
3065
3066 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
3067 lockres->l_unlock_action);
3068
3069 spin_lock_irqsave(&lockres->l_lock, flags);
3070 if (error) {
3071 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
3072 "unlock_action %d\n", error, lockres->l_name,
3073 lockres->l_unlock_action);
3074 spin_unlock_irqrestore(&lockres->l_lock, flags);
3075 mlog_exit_void();
3076 return;
3077 }
3078
3079 switch(lockres->l_unlock_action) {
3080 case OCFS2_UNLOCK_CANCEL_CONVERT:
3081 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
3082 lockres->l_action = OCFS2_AST_INVALID;
3083 /* Downconvert thread may have requeued this lock, we
3084 * need to wake it. */
3085 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3086 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
3087 break;
3088 case OCFS2_UNLOCK_DROP_LOCK:
3089 lockres->l_level = DLM_LOCK_IV;
3090 break;
3091 default:
3092 BUG();
3093 }
3094
3095 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
3096 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
3097 wake_up(&lockres->l_event);
3098 spin_unlock_irqrestore(&lockres->l_lock, flags);
3099
3100 mlog_exit_void();
3101}
3102
3103static int ocfs2_drop_lock(struct ocfs2_super *osb, 3150static int ocfs2_drop_lock(struct ocfs2_super *osb,
3104 struct ocfs2_lock_res *lockres) 3151 struct ocfs2_lock_res *lockres)
3105{ 3152{
@@ -3167,8 +3214,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
3167 3214
3168 mlog(0, "lock %s\n", lockres->l_name); 3215 mlog(0, "lock %s\n", lockres->l_name);
3169 3216
3170 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags, 3217 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3171 lockres);
3172 if (ret) { 3218 if (ret) {
3173 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); 3219 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3174 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags); 3220 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
@@ -3276,13 +3322,20 @@ static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3276 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); 3322 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3277 3323
3278 if (lockres->l_level <= new_level) { 3324 if (lockres->l_level <= new_level) {
3279 mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n", 3325 mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3280 lockres->l_level, new_level); 3326 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3327 "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3328 new_level, list_empty(&lockres->l_blocked_list),
3329 list_empty(&lockres->l_mask_waiters), lockres->l_type,
3330 lockres->l_flags, lockres->l_ro_holders,
3331 lockres->l_ex_holders, lockres->l_action,
3332 lockres->l_unlock_action, lockres->l_requested,
3333 lockres->l_blocking, lockres->l_pending_gen);
3281 BUG(); 3334 BUG();
3282 } 3335 }
3283 3336
3284 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n", 3337 mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3285 lockres->l_name, new_level, lockres->l_blocking); 3338 lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3286 3339
3287 lockres->l_action = OCFS2_AST_DOWNCONVERT; 3340 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3288 lockres->l_requested = new_level; 3341 lockres->l_requested = new_level;
@@ -3301,6 +3354,9 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3301 3354
3302 mlog_entry_void(); 3355 mlog_entry_void();
3303 3356
3357 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3358 lockres->l_level, new_level);
3359
3304 if (lvb) 3360 if (lvb)
3305 dlm_flags |= DLM_LKF_VALBLK; 3361 dlm_flags |= DLM_LKF_VALBLK;
3306 3362
@@ -3309,8 +3365,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3309 &lockres->l_lksb, 3365 &lockres->l_lksb,
3310 dlm_flags, 3366 dlm_flags,
3311 lockres->l_name, 3367 lockres->l_name,
3312 OCFS2_LOCK_ID_MAX_LEN - 1, 3368 OCFS2_LOCK_ID_MAX_LEN - 1);
3313 lockres);
3314 lockres_clear_pending(lockres, generation, osb); 3369 lockres_clear_pending(lockres, generation, osb);
3315 if (ret) { 3370 if (ret) {
3316 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); 3371 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -3331,14 +3386,12 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3331 assert_spin_locked(&lockres->l_lock); 3386 assert_spin_locked(&lockres->l_lock);
3332 3387
3333 mlog_entry_void(); 3388 mlog_entry_void();
3334 mlog(0, "lock %s\n", lockres->l_name);
3335 3389
3336 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { 3390 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3337 /* If we're already trying to cancel a lock conversion 3391 /* If we're already trying to cancel a lock conversion
3338 * then just drop the spinlock and allow the caller to 3392 * then just drop the spinlock and allow the caller to
3339 * requeue this lock. */ 3393 * requeue this lock. */
3340 3394 mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3341 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
3342 return 0; 3395 return 0;
3343 } 3396 }
3344 3397
@@ -3353,6 +3406,8 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3353 "lock %s, invalid flags: 0x%lx\n", 3406 "lock %s, invalid flags: 0x%lx\n",
3354 lockres->l_name, lockres->l_flags); 3407 lockres->l_name, lockres->l_flags);
3355 3408
3409 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3410
3356 return 1; 3411 return 1;
3357} 3412}
3358 3413
@@ -3362,16 +3417,15 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3362 int ret; 3417 int ret;
3363 3418
3364 mlog_entry_void(); 3419 mlog_entry_void();
3365 mlog(0, "lock %s\n", lockres->l_name);
3366 3420
3367 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, 3421 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3368 DLM_LKF_CANCEL, lockres); 3422 DLM_LKF_CANCEL);
3369 if (ret) { 3423 if (ret) {
3370 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); 3424 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3371 ocfs2_recover_from_dlm_error(lockres, 0); 3425 ocfs2_recover_from_dlm_error(lockres, 0);
3372 } 3426 }
3373 3427
3374 mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name); 3428 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3375 3429
3376 mlog_exit(ret); 3430 mlog_exit(ret);
3377 return ret; 3431 return ret;
@@ -3428,8 +3482,11 @@ recheck:
3428 * at the same time they set OCFS2_DLM_BUSY. They must 3482 * at the same time they set OCFS2_DLM_BUSY. They must
3429 * clear OCFS2_DLM_PENDING after dlm_lock() returns. 3483 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3430 */ 3484 */
3431 if (lockres->l_flags & OCFS2_LOCK_PENDING) 3485 if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3486 mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3487 lockres->l_name);
3432 goto leave_requeue; 3488 goto leave_requeue;
3489 }
3433 3490
3434 ctl->requeue = 1; 3491 ctl->requeue = 1;
3435 ret = ocfs2_prepare_cancel_convert(osb, lockres); 3492 ret = ocfs2_prepare_cancel_convert(osb, lockres);
@@ -3461,6 +3518,7 @@ recheck:
3461 */ 3518 */
3462 if (lockres->l_level == DLM_LOCK_NL) { 3519 if (lockres->l_level == DLM_LOCK_NL) {
3463 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders); 3520 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3521 mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3464 lockres->l_blocking = DLM_LOCK_NL; 3522 lockres->l_blocking = DLM_LOCK_NL;
3465 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); 3523 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3466 spin_unlock_irqrestore(&lockres->l_lock, flags); 3524 spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -3470,28 +3528,41 @@ recheck:
3470 /* if we're blocking an exclusive and we have *any* holders, 3528 /* if we're blocking an exclusive and we have *any* holders,
3471 * then requeue. */ 3529 * then requeue. */
3472 if ((lockres->l_blocking == DLM_LOCK_EX) 3530 if ((lockres->l_blocking == DLM_LOCK_EX)
3473 && (lockres->l_ex_holders || lockres->l_ro_holders)) 3531 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3532 mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3533 lockres->l_name, lockres->l_ex_holders,
3534 lockres->l_ro_holders);
3474 goto leave_requeue; 3535 goto leave_requeue;
3536 }
3475 3537
3476 /* If it's a PR we're blocking, then only 3538 /* If it's a PR we're blocking, then only
3477 * requeue if we've got any EX holders */ 3539 * requeue if we've got any EX holders */
3478 if (lockres->l_blocking == DLM_LOCK_PR && 3540 if (lockres->l_blocking == DLM_LOCK_PR &&
3479 lockres->l_ex_holders) 3541 lockres->l_ex_holders) {
3542 mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3543 lockres->l_name, lockres->l_ex_holders);
3480 goto leave_requeue; 3544 goto leave_requeue;
3545 }
3481 3546
3482 /* 3547 /*
3483 * Can we get a lock in this state if the holder counts are 3548 * Can we get a lock in this state if the holder counts are
3484 * zero? The meta data unblock code used to check this. 3549 * zero? The meta data unblock code used to check this.
3485 */ 3550 */
3486 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) 3551 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3487 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) 3552 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3553 mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3554 lockres->l_name);
3488 goto leave_requeue; 3555 goto leave_requeue;
3556 }
3489 3557
3490 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); 3558 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3491 3559
3492 if (lockres->l_ops->check_downconvert 3560 if (lockres->l_ops->check_downconvert
3493 && !lockres->l_ops->check_downconvert(lockres, new_level)) 3561 && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3562 mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3563 lockres->l_name);
3494 goto leave_requeue; 3564 goto leave_requeue;
3565 }
3495 3566
3496 /* If we get here, then we know that there are no more 3567 /* If we get here, then we know that there are no more
3497 * incompatible holders (and anyone asking for an incompatible 3568 * incompatible holders (and anyone asking for an incompatible
@@ -3509,13 +3580,19 @@ recheck:
3509 3580
3510 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); 3581 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3511 3582
3512 if (ctl->unblock_action == UNBLOCK_STOP_POST) 3583 if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3584 mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3585 lockres->l_name);
3513 goto leave; 3586 goto leave;
3587 }
3514 3588
3515 spin_lock_irqsave(&lockres->l_lock, flags); 3589 spin_lock_irqsave(&lockres->l_lock, flags);
3516 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) { 3590 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3517 /* If this changed underneath us, then we can't drop 3591 /* If this changed underneath us, then we can't drop
3518 * it just yet. */ 3592 * it just yet. */
3593 mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3594 "Recheck\n", lockres->l_name, blocking,
3595 lockres->l_blocking, level, lockres->l_level);
3519 goto recheck; 3596 goto recheck;
3520 } 3597 }
3521 3598
@@ -3910,45 +3987,6 @@ void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
3910 ocfs2_cluster_unlock(osb, lockres, level); 3987 ocfs2_cluster_unlock(osb, lockres, level);
3911} 3988}
3912 3989
3913/*
3914 * This is the filesystem locking protocol. It provides the lock handling
3915 * hooks for the underlying DLM. It has a maximum version number.
3916 * The version number allows interoperability with systems running at
3917 * the same major number and an equal or smaller minor number.
3918 *
3919 * Whenever the filesystem does new things with locks (adds or removes a
3920 * lock, orders them differently, does different things underneath a lock),
3921 * the version must be changed. The protocol is negotiated when joining
3922 * the dlm domain. A node may join the domain if its major version is
3923 * identical to all other nodes and its minor version is greater than
3924 * or equal to all other nodes. When its minor version is greater than
3925 * the other nodes, it will run at the minor version specified by the
3926 * other nodes.
3927 *
3928 * If a locking change is made that will not be compatible with older
3929 * versions, the major number must be increased and the minor version set
3930 * to zero. If a change merely adds a behavior that can be disabled when
3931 * speaking to older versions, the minor version must be increased. If a
3932 * change adds a fully backwards compatible change (eg, LVB changes that
3933 * are just ignored by older versions), the version does not need to be
3934 * updated.
3935 */
3936static struct ocfs2_locking_protocol lproto = {
3937 .lp_max_version = {
3938 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
3939 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
3940 },
3941 .lp_lock_ast = ocfs2_locking_ast,
3942 .lp_blocking_ast = ocfs2_blocking_ast,
3943 .lp_unlock_ast = ocfs2_unlock_ast,
3944};
3945
3946void ocfs2_set_locking_protocol(void)
3947{
3948 ocfs2_stack_glue_set_locking_protocol(&lproto);
3949}
3950
3951
3952static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, 3990static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3953 struct ocfs2_lock_res *lockres) 3991 struct ocfs2_lock_res *lockres)
3954{ 3992{
@@ -3965,7 +4003,7 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3965 BUG_ON(!lockres); 4003 BUG_ON(!lockres);
3966 BUG_ON(!lockres->l_ops); 4004 BUG_ON(!lockres->l_ops);
3967 4005
3968 mlog(0, "lockres %s blocked.\n", lockres->l_name); 4006 mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
3969 4007
3970 /* Detect whether a lock has been marked as going away while 4008 /* Detect whether a lock has been marked as going away while
3971 * the downconvert thread was processing other things. A lock can 4009 * the downconvert thread was processing other things. A lock can
@@ -3988,7 +4026,7 @@ unqueue:
3988 } else 4026 } else
3989 ocfs2_schedule_blocked_lock(osb, lockres); 4027 ocfs2_schedule_blocked_lock(osb, lockres);
3990 4028
3991 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name, 4029 mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
3992 ctl.requeue ? "yes" : "no"); 4030 ctl.requeue ? "yes" : "no");
3993 spin_unlock_irqrestore(&lockres->l_lock, flags); 4031 spin_unlock_irqrestore(&lockres->l_lock, flags);
3994 4032
@@ -4010,7 +4048,7 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
4010 /* Do not schedule a lock for downconvert when it's on 4048 /* Do not schedule a lock for downconvert when it's on
4011 * the way to destruction - any nodes wanting access 4049 * the way to destruction - any nodes wanting access
4012 * to the resource will get it soon. */ 4050 * to the resource will get it soon. */
4013 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n", 4051 mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
4014 lockres->l_name, lockres->l_flags); 4052 lockres->l_name, lockres->l_flags);
4015 return; 4053 return;
4016 } 4054 }
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 364105291282..17947dc8341e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -997,10 +997,9 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
997 } 997 }
998 998
999 if (size_change && attr->ia_size != i_size_read(inode)) { 999 if (size_change && attr->ia_size != i_size_read(inode)) {
1000 if (attr->ia_size > sb->s_maxbytes) { 1000 status = inode_newsize_ok(inode, attr->ia_size);
1001 status = -EFBIG; 1001 if (status)
1002 goto bail_unlock; 1002 goto bail_unlock;
1003 }
1004 1003
1005 if (i_size_read(inode) > attr->ia_size) { 1004 if (i_size_read(inode) > attr->ia_size) {
1006 if (ocfs2_should_order_data(inode)) { 1005 if (ocfs2_should_order_data(inode)) {
@@ -1840,6 +1839,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1840 &meta_level); 1839 &meta_level);
1841 if (has_refcount) 1840 if (has_refcount)
1842 *has_refcount = 1; 1841 *has_refcount = 1;
1842 if (direct_io)
1843 *direct_io = 0;
1843 } 1844 }
1844 1845
1845 if (ret < 0) { 1846 if (ret < 0) {
@@ -1863,10 +1864,6 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1863 break; 1864 break;
1864 } 1865 }
1865 1866
1866 if (has_refcount && *has_refcount == 1) {
1867 *direct_io = 0;
1868 break;
1869 }
1870 /* 1867 /*
1871 * Allowing concurrent direct writes means 1868 * Allowing concurrent direct writes means
1872 * i_size changes wouldn't be synchronized, so 1869 * i_size changes wouldn't be synchronized, so
@@ -2047,7 +2044,7 @@ out_dio:
2047 * async dio is going to do it in the future or an end_io after an 2044 * async dio is going to do it in the future or an end_io after an
2048 * error has already done it. 2045 * error has already done it.
2049 */ 2046 */
2050 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { 2047 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2051 rw_level = -1; 2048 rw_level = -1;
2052 have_alloc_sem = 0; 2049 have_alloc_sem = 0;
2053 } 2050 }
diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h
index cf9a5ee30fef..0cd5323bd3f0 100644
--- a/fs/ocfs2/ioctl.h
+++ b/fs/ocfs2/ioctl.h
@@ -7,10 +7,10 @@
7 * 7 *
8 */ 8 */
9 9
10#ifndef OCFS2_IOCTL_H 10#ifndef OCFS2_IOCTL_PROTO_H
11#define OCFS2_IOCTL_H 11#define OCFS2_IOCTL_PROTO_H
12 12
13long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 13long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
14long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg); 14long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg);
15 15
16#endif /* OCFS2_IOCTL_H */ 16#endif /* OCFS2_IOCTL_PROTO_H */
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ac10f83edb95..ca992d91f511 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -476,7 +476,7 @@ out_mutex:
476 476
477out: 477out:
478 if (!status) 478 if (!status)
479 ocfs2_init_inode_steal_slot(osb); 479 ocfs2_init_steal_slots(osb);
480 mlog_exit(status); 480 mlog_exit(status);
481 return status; 481 return status;
482} 482}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 740f448041e2..1238b491db90 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -42,6 +42,7 @@
42 42
43#include "ocfs2_fs.h" 43#include "ocfs2_fs.h"
44#include "ocfs2_lockid.h" 44#include "ocfs2_lockid.h"
45#include "ocfs2_ioctl.h"
45 46
46/* For struct ocfs2_blockcheck_stats */ 47/* For struct ocfs2_blockcheck_stats */
47#include "blockcheck.h" 48#include "blockcheck.h"
@@ -159,7 +160,7 @@ struct ocfs2_lock_res {
159 int l_level; 160 int l_level;
160 unsigned int l_ro_holders; 161 unsigned int l_ro_holders;
161 unsigned int l_ex_holders; 162 unsigned int l_ex_holders;
162 union ocfs2_dlm_lksb l_lksb; 163 struct ocfs2_dlm_lksb l_lksb;
163 164
164 /* used from AST/BAST funcs. */ 165 /* used from AST/BAST funcs. */
165 enum ocfs2_ast_action l_action; 166 enum ocfs2_ast_action l_action;
@@ -305,7 +306,9 @@ struct ocfs2_super
305 u32 s_next_generation; 306 u32 s_next_generation;
306 unsigned long osb_flags; 307 unsigned long osb_flags;
307 s16 s_inode_steal_slot; 308 s16 s_inode_steal_slot;
309 s16 s_meta_steal_slot;
308 atomic_t s_num_inodes_stolen; 310 atomic_t s_num_inodes_stolen;
311 atomic_t s_num_meta_stolen;
309 312
310 unsigned long s_mount_opt; 313 unsigned long s_mount_opt;
311 unsigned int s_atime_quantum; 314 unsigned int s_atime_quantum;
@@ -760,33 +763,6 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
760 return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits); 763 return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
761} 764}
762 765
763static inline void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
764{
765 spin_lock(&osb->osb_lock);
766 osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
767 spin_unlock(&osb->osb_lock);
768 atomic_set(&osb->s_num_inodes_stolen, 0);
769}
770
771static inline void ocfs2_set_inode_steal_slot(struct ocfs2_super *osb,
772 s16 slot)
773{
774 spin_lock(&osb->osb_lock);
775 osb->s_inode_steal_slot = slot;
776 spin_unlock(&osb->osb_lock);
777}
778
779static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
780{
781 s16 slot;
782
783 spin_lock(&osb->osb_lock);
784 slot = osb->s_inode_steal_slot;
785 spin_unlock(&osb->osb_lock);
786
787 return slot;
788}
789
790#define ocfs2_set_bit ext2_set_bit 766#define ocfs2_set_bit ext2_set_bit
791#define ocfs2_clear_bit ext2_clear_bit 767#define ocfs2_clear_bit ext2_clear_bit
792#define ocfs2_test_bit ext2_test_bit 768#define ocfs2_test_bit ext2_test_bit
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 7638a38c32bc..bb37218a7978 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -254,63 +254,6 @@
254 * refcount tree */ 254 * refcount tree */
255 255
256/* 256/*
257 * ioctl commands
258 */
259#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
260#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
261#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
262#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
263
264/*
265 * Space reservation / allocation / free ioctls and argument structure
266 * are designed to be compatible with XFS.
267 *
268 * ALLOCSP* and FREESP* are not and will never be supported, but are
269 * included here for completeness.
270 */
271struct ocfs2_space_resv {
272 __s16 l_type;
273 __s16 l_whence;
274 __s64 l_start;
275 __s64 l_len; /* len == 0 means until end of file */
276 __s32 l_sysid;
277 __u32 l_pid;
278 __s32 l_pad[4]; /* reserve area */
279};
280
281#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
282#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
283#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
284#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
285#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
286#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
287#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
288#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
289
290/* Used to pass group descriptor data when online resize is done */
291struct ocfs2_new_group_input {
292 __u64 group; /* Group descriptor's blkno. */
293 __u32 clusters; /* Total number of clusters in this group */
294 __u32 frees; /* Total free clusters in this group */
295 __u16 chain; /* Chain for this group */
296 __u16 reserved1;
297 __u32 reserved2;
298};
299
300#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
301#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
302#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
303
304/* Used to pass 2 file names to reflink. */
305struct reflink_arguments {
306 __u64 old_path;
307 __u64 new_path;
308 __u64 preserve;
309};
310#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
311
312
313/*
314 * Journal Flags (ocfs2_dinode.id1.journal1.i_flags) 257 * Journal Flags (ocfs2_dinode.id1.journal1.i_flags)
315 */ 258 */
316#define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */ 259#define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
new file mode 100644
index 000000000000..2d3420af1a83
--- /dev/null
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -0,0 +1,79 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * ocfs2_ioctl.h
5 *
6 * Defines OCFS2 ioctls.
7 *
8 * Copyright (C) 2010 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License, version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20#ifndef OCFS2_IOCTL_H
21#define OCFS2_IOCTL_H
22
23/*
24 * ioctl commands
25 */
26#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
27#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
28#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
29#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
30
31/*
32 * Space reservation / allocation / free ioctls and argument structure
33 * are designed to be compatible with XFS.
34 *
35 * ALLOCSP* and FREESP* are not and will never be supported, but are
36 * included here for completeness.
37 */
38struct ocfs2_space_resv {
39 __s16 l_type;
40 __s16 l_whence;
41 __s64 l_start;
42 __s64 l_len; /* len == 0 means until end of file */
43 __s32 l_sysid;
44 __u32 l_pid;
45 __s32 l_pad[4]; /* reserve area */
46};
47
48#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
49#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
50#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
51#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
52#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
53#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
54#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
55#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
56
57/* Used to pass group descriptor data when online resize is done */
58struct ocfs2_new_group_input {
59 __u64 group; /* Group descriptor's blkno. */
60 __u32 clusters; /* Total number of clusters in this group */
61 __u32 frees; /* Total free clusters in this group */
62 __u16 chain; /* Chain for this group */
63 __u16 reserved1;
64 __u32 reserved2;
65};
66
67#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
68#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
69#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
70
71/* Used to pass 2 file names to reflink. */
72struct reflink_arguments {
73 __u64 old_path;
74 __u64 new_path;
75 __u64 preserve;
76};
77#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
78
79#endif /* OCFS2_IOCTL_H */
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h
index 82d5eeac0fff..2e45c8d2ea7e 100644
--- a/fs/ocfs2/ocfs2_lockingver.h
+++ b/fs/ocfs2/ocfs2_lockingver.h
@@ -23,6 +23,8 @@
23/* 23/*
24 * The protocol version for ocfs2 cluster locking. See dlmglue.c for 24 * The protocol version for ocfs2 cluster locking. See dlmglue.c for
25 * more details. 25 * more details.
26 *
27 * 1.0 - Initial locking version from ocfs2 1.4.
26 */ 28 */
27#define OCFS2_LOCKING_PROTOCOL_MAJOR 1 29#define OCFS2_LOCKING_PROTOCOL_MAJOR 1
28#define OCFS2_LOCKING_PROTOCOL_MINOR 0 30#define OCFS2_LOCKING_PROTOCOL_MINOR 0
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index f3ae10cde841..9e96921dffda 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -626,7 +626,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
626 rb = (struct ocfs2_refcount_block *)new_bh->b_data; 626 rb = (struct ocfs2_refcount_block *)new_bh->b_data;
627 memset(rb, 0, inode->i_sb->s_blocksize); 627 memset(rb, 0, inode->i_sb->s_blocksize);
628 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 628 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
629 rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num); 629 rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
630 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 630 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
631 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); 631 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
632 rb->rf_blkno = cpu_to_le64(first_blkno); 632 rb->rf_blkno = cpu_to_le64(first_blkno);
@@ -1330,7 +1330,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
1330 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); 1330 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
1331 1331
1332 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1332 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1333 new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num); 1333 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1334 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1334 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1335 new_rb->rf_blkno = cpu_to_le64(blkno); 1335 new_rb->rf_blkno = cpu_to_le64(blkno);
1336 new_rb->rf_cpos = cpu_to_le32(0); 1336 new_rb->rf_cpos = cpu_to_le32(0);
@@ -1576,7 +1576,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1576 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1576 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1577 memset(new_rb, 0, sb->s_blocksize); 1577 memset(new_rb, 0, sb->s_blocksize);
1578 strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 1578 strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
1579 new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num); 1579 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1580 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1580 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1581 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); 1581 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
1582 new_rb->rf_blkno = cpu_to_le64(blkno); 1582 new_rb->rf_blkno = cpu_to_le64(blkno);
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 3038c92af493..7020e1253ffa 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -161,24 +161,23 @@ static int dlm_status_to_errno(enum dlm_status status)
161 161
162static void o2dlm_lock_ast_wrapper(void *astarg) 162static void o2dlm_lock_ast_wrapper(void *astarg)
163{ 163{
164 BUG_ON(o2cb_stack.sp_proto == NULL); 164 struct ocfs2_dlm_lksb *lksb = astarg;
165 165
166 o2cb_stack.sp_proto->lp_lock_ast(astarg); 166 lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
167} 167}
168 168
169static void o2dlm_blocking_ast_wrapper(void *astarg, int level) 169static void o2dlm_blocking_ast_wrapper(void *astarg, int level)
170{ 170{
171 BUG_ON(o2cb_stack.sp_proto == NULL); 171 struct ocfs2_dlm_lksb *lksb = astarg;
172 172
173 o2cb_stack.sp_proto->lp_blocking_ast(astarg, level); 173 lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
174} 174}
175 175
176static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status) 176static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
177{ 177{
178 struct ocfs2_dlm_lksb *lksb = astarg;
178 int error = dlm_status_to_errno(status); 179 int error = dlm_status_to_errno(status);
179 180
180 BUG_ON(o2cb_stack.sp_proto == NULL);
181
182 /* 181 /*
183 * In o2dlm, you can get both the lock_ast() for the lock being 182 * In o2dlm, you can get both the lock_ast() for the lock being
184 * granted and the unlock_ast() for the CANCEL failing. A 183 * granted and the unlock_ast() for the CANCEL failing. A
@@ -193,16 +192,15 @@ static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
193 if (status == DLM_CANCELGRANT) 192 if (status == DLM_CANCELGRANT)
194 return; 193 return;
195 194
196 o2cb_stack.sp_proto->lp_unlock_ast(astarg, error); 195 lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, error);
197} 196}
198 197
199static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn, 198static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
200 int mode, 199 int mode,
201 union ocfs2_dlm_lksb *lksb, 200 struct ocfs2_dlm_lksb *lksb,
202 u32 flags, 201 u32 flags,
203 void *name, 202 void *name,
204 unsigned int namelen, 203 unsigned int namelen)
205 void *astarg)
206{ 204{
207 enum dlm_status status; 205 enum dlm_status status;
208 int o2dlm_mode = mode_to_o2dlm(mode); 206 int o2dlm_mode = mode_to_o2dlm(mode);
@@ -211,28 +209,27 @@ static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
211 209
212 status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm, 210 status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm,
213 o2dlm_flags, name, namelen, 211 o2dlm_flags, name, namelen,
214 o2dlm_lock_ast_wrapper, astarg, 212 o2dlm_lock_ast_wrapper, lksb,
215 o2dlm_blocking_ast_wrapper); 213 o2dlm_blocking_ast_wrapper);
216 ret = dlm_status_to_errno(status); 214 ret = dlm_status_to_errno(status);
217 return ret; 215 return ret;
218} 216}
219 217
220static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn, 218static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn,
221 union ocfs2_dlm_lksb *lksb, 219 struct ocfs2_dlm_lksb *lksb,
222 u32 flags, 220 u32 flags)
223 void *astarg)
224{ 221{
225 enum dlm_status status; 222 enum dlm_status status;
226 int o2dlm_flags = flags_to_o2dlm(flags); 223 int o2dlm_flags = flags_to_o2dlm(flags);
227 int ret; 224 int ret;
228 225
229 status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm, 226 status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm,
230 o2dlm_flags, o2dlm_unlock_ast_wrapper, astarg); 227 o2dlm_flags, o2dlm_unlock_ast_wrapper, lksb);
231 ret = dlm_status_to_errno(status); 228 ret = dlm_status_to_errno(status);
232 return ret; 229 return ret;
233} 230}
234 231
235static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb) 232static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
236{ 233{
237 return dlm_status_to_errno(lksb->lksb_o2dlm.status); 234 return dlm_status_to_errno(lksb->lksb_o2dlm.status);
238} 235}
@@ -242,17 +239,17 @@ static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
242 * contents, it will zero out the LVB. Thus the caller can always trust 239 * contents, it will zero out the LVB. Thus the caller can always trust
243 * the contents. 240 * the contents.
244 */ 241 */
245static int o2cb_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb) 242static int o2cb_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
246{ 243{
247 return 1; 244 return 1;
248} 245}
249 246
250static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb) 247static void *o2cb_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
251{ 248{
252 return (void *)(lksb->lksb_o2dlm.lvb); 249 return (void *)(lksb->lksb_o2dlm.lvb);
253} 250}
254 251
255static void o2cb_dump_lksb(union ocfs2_dlm_lksb *lksb) 252static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
256{ 253{
257 dlm_print_one_lock(lksb->lksb_o2dlm.lockid); 254 dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
258} 255}
@@ -280,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
280 struct dlm_protocol_version fs_version; 277 struct dlm_protocol_version fs_version;
281 278
282 BUG_ON(conn == NULL); 279 BUG_ON(conn == NULL);
283 BUG_ON(o2cb_stack.sp_proto == NULL); 280 BUG_ON(conn->cc_proto == NULL);
284 281
285 /* for now we only have one cluster/node, make sure we see it 282 /* for now we only have one cluster/node, make sure we see it
286 * in the heartbeat universe */ 283 * in the heartbeat universe */
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index da78a2a334fd..5ae8812b2864 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -25,7 +25,6 @@
25#include <linux/reboot.h> 25#include <linux/reboot.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27 27
28#include "ocfs2.h" /* For struct ocfs2_lock_res */
29#include "stackglue.h" 28#include "stackglue.h"
30 29
31#include <linux/dlm_plock.h> 30#include <linux/dlm_plock.h>
@@ -63,8 +62,8 @@
63 * negotiated by the client. The client negotiates based on the maximum 62 * negotiated by the client. The client negotiates based on the maximum
64 * version advertised in /sys/fs/ocfs2/max_locking_protocol. The major 63 * version advertised in /sys/fs/ocfs2/max_locking_protocol. The major
65 * number from the "SETV" message must match 64 * number from the "SETV" message must match
66 * ocfs2_user_plugin.sp_proto->lp_max_version.pv_major, and the minor number 65 * ocfs2_user_plugin.sp_max_proto.pv_major, and the minor number
67 * must be less than or equal to ...->lp_max_version.pv_minor. 66 * must be less than or equal to ...sp_max_version.pv_minor.
68 * 67 *
69 * Once this information has been set, mounts will be allowed. From this 68 * Once this information has been set, mounts will be allowed. From this
70 * point on, the "DOWN" message can be sent for node down notification. 69 * point on, the "DOWN" message can be sent for node down notification.
@@ -401,7 +400,7 @@ static int ocfs2_control_do_setversion_msg(struct file *file,
401 char *ptr = NULL; 400 char *ptr = NULL;
402 struct ocfs2_control_private *p = file->private_data; 401 struct ocfs2_control_private *p = file->private_data;
403 struct ocfs2_protocol_version *max = 402 struct ocfs2_protocol_version *max =
404 &ocfs2_user_plugin.sp_proto->lp_max_version; 403 &ocfs2_user_plugin.sp_max_proto;
405 404
406 if (ocfs2_control_get_handshake_state(file) != 405 if (ocfs2_control_get_handshake_state(file) !=
407 OCFS2_CONTROL_HANDSHAKE_PROTOCOL) 406 OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
@@ -664,18 +663,10 @@ static void ocfs2_control_exit(void)
664 -rc); 663 -rc);
665} 664}
666 665
667static struct dlm_lksb *fsdlm_astarg_to_lksb(void *astarg)
668{
669 struct ocfs2_lock_res *res = astarg;
670 return &res->l_lksb.lksb_fsdlm;
671}
672
673static void fsdlm_lock_ast_wrapper(void *astarg) 666static void fsdlm_lock_ast_wrapper(void *astarg)
674{ 667{
675 struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg); 668 struct ocfs2_dlm_lksb *lksb = astarg;
676 int status = lksb->sb_status; 669 int status = lksb->lksb_fsdlm.sb_status;
677
678 BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
679 670
680 /* 671 /*
681 * For now we're punting on the issue of other non-standard errors 672 * For now we're punting on the issue of other non-standard errors
@@ -688,25 +679,24 @@ static void fsdlm_lock_ast_wrapper(void *astarg)
688 */ 679 */
689 680
690 if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL) 681 if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL)
691 ocfs2_user_plugin.sp_proto->lp_unlock_ast(astarg, 0); 682 lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, 0);
692 else 683 else
693 ocfs2_user_plugin.sp_proto->lp_lock_ast(astarg); 684 lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
694} 685}
695 686
696static void fsdlm_blocking_ast_wrapper(void *astarg, int level) 687static void fsdlm_blocking_ast_wrapper(void *astarg, int level)
697{ 688{
698 BUG_ON(ocfs2_user_plugin.sp_proto == NULL); 689 struct ocfs2_dlm_lksb *lksb = astarg;
699 690
700 ocfs2_user_plugin.sp_proto->lp_blocking_ast(astarg, level); 691 lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
701} 692}
702 693
703static int user_dlm_lock(struct ocfs2_cluster_connection *conn, 694static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
704 int mode, 695 int mode,
705 union ocfs2_dlm_lksb *lksb, 696 struct ocfs2_dlm_lksb *lksb,
706 u32 flags, 697 u32 flags,
707 void *name, 698 void *name,
708 unsigned int namelen, 699 unsigned int namelen)
709 void *astarg)
710{ 700{
711 int ret; 701 int ret;
712 702
@@ -716,36 +706,35 @@ static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
716 706
717 ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm, 707 ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm,
718 flags|DLM_LKF_NODLCKWT, name, namelen, 0, 708 flags|DLM_LKF_NODLCKWT, name, namelen, 0,
719 fsdlm_lock_ast_wrapper, astarg, 709 fsdlm_lock_ast_wrapper, lksb,
720 fsdlm_blocking_ast_wrapper); 710 fsdlm_blocking_ast_wrapper);
721 return ret; 711 return ret;
722} 712}
723 713
724static int user_dlm_unlock(struct ocfs2_cluster_connection *conn, 714static int user_dlm_unlock(struct ocfs2_cluster_connection *conn,
725 union ocfs2_dlm_lksb *lksb, 715 struct ocfs2_dlm_lksb *lksb,
726 u32 flags, 716 u32 flags)
727 void *astarg)
728{ 717{
729 int ret; 718 int ret;
730 719
731 ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid, 720 ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid,
732 flags, &lksb->lksb_fsdlm, astarg); 721 flags, &lksb->lksb_fsdlm, lksb);
733 return ret; 722 return ret;
734} 723}
735 724
736static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb) 725static int user_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
737{ 726{
738 return lksb->lksb_fsdlm.sb_status; 727 return lksb->lksb_fsdlm.sb_status;
739} 728}
740 729
741static int user_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb) 730static int user_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
742{ 731{
743 int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID; 732 int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID;
744 733
745 return !invalid; 734 return !invalid;
746} 735}
747 736
748static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) 737static void *user_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
749{ 738{
750 if (!lksb->lksb_fsdlm.sb_lvbptr) 739 if (!lksb->lksb_fsdlm.sb_lvbptr)
751 lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb + 740 lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb +
@@ -753,7 +742,7 @@ static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
753 return (void *)(lksb->lksb_fsdlm.sb_lvbptr); 742 return (void *)(lksb->lksb_fsdlm.sb_lvbptr);
754} 743}
755 744
756static void user_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb) 745static void user_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
757{ 746{
758} 747}
759 748
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index f3df0baa9a48..39abf89697ed 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -36,7 +36,7 @@
36#define OCFS2_STACK_PLUGIN_USER "user" 36#define OCFS2_STACK_PLUGIN_USER "user"
37#define OCFS2_MAX_HB_CTL_PATH 256 37#define OCFS2_MAX_HB_CTL_PATH 256
38 38
39static struct ocfs2_locking_protocol *lproto; 39static struct ocfs2_protocol_version locking_max_version;
40static DEFINE_SPINLOCK(ocfs2_stack_lock); 40static DEFINE_SPINLOCK(ocfs2_stack_lock);
41static LIST_HEAD(ocfs2_stack_list); 41static LIST_HEAD(ocfs2_stack_list);
42static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1]; 42static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1];
@@ -176,7 +176,7 @@ int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin)
176 spin_lock(&ocfs2_stack_lock); 176 spin_lock(&ocfs2_stack_lock);
177 if (!ocfs2_stack_lookup(plugin->sp_name)) { 177 if (!ocfs2_stack_lookup(plugin->sp_name)) {
178 plugin->sp_count = 0; 178 plugin->sp_count = 0;
179 plugin->sp_proto = lproto; 179 plugin->sp_max_proto = locking_max_version;
180 list_add(&plugin->sp_list, &ocfs2_stack_list); 180 list_add(&plugin->sp_list, &ocfs2_stack_list);
181 printk(KERN_INFO "ocfs2: Registered cluster interface %s\n", 181 printk(KERN_INFO "ocfs2: Registered cluster interface %s\n",
182 plugin->sp_name); 182 plugin->sp_name);
@@ -213,77 +213,76 @@ void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin)
213} 213}
214EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister); 214EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister);
215 215
216void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto) 216void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto)
217{ 217{
218 struct ocfs2_stack_plugin *p; 218 struct ocfs2_stack_plugin *p;
219 219
220 BUG_ON(proto == NULL);
221
222 spin_lock(&ocfs2_stack_lock); 220 spin_lock(&ocfs2_stack_lock);
223 BUG_ON(active_stack != NULL); 221 if (memcmp(max_proto, &locking_max_version,
222 sizeof(struct ocfs2_protocol_version))) {
223 BUG_ON(locking_max_version.pv_major != 0);
224 224
225 lproto = proto; 225 locking_max_version = *max_proto;
226 list_for_each_entry(p, &ocfs2_stack_list, sp_list) { 226 list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
227 p->sp_proto = lproto; 227 p->sp_max_proto = locking_max_version;
228 }
228 } 229 }
229
230 spin_unlock(&ocfs2_stack_lock); 230 spin_unlock(&ocfs2_stack_lock);
231} 231}
232EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol); 232EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_max_proto_version);
233 233
234 234
235/* 235/*
236 * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take 236 * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take no argument
237 * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the 237 * for the ast and bast functions. They will pass the lksb to the ast
238 * underlying stack plugins need to pilfer the lksb off of the lock_res. 238 * and bast. The caller can wrap the lksb with their own structure to
239 * If some other structure needs to be passed as an astarg, the plugins 239 * get more information.
240 * will need to be given a different avenue to the lksb.
241 */ 240 */
242int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, 241int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
243 int mode, 242 int mode,
244 union ocfs2_dlm_lksb *lksb, 243 struct ocfs2_dlm_lksb *lksb,
245 u32 flags, 244 u32 flags,
246 void *name, 245 void *name,
247 unsigned int namelen, 246 unsigned int namelen)
248 struct ocfs2_lock_res *astarg)
249{ 247{
250 BUG_ON(lproto == NULL); 248 if (!lksb->lksb_conn)
251 249 lksb->lksb_conn = conn;
250 else
251 BUG_ON(lksb->lksb_conn != conn);
252 return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags, 252 return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags,
253 name, namelen, astarg); 253 name, namelen);
254} 254}
255EXPORT_SYMBOL_GPL(ocfs2_dlm_lock); 255EXPORT_SYMBOL_GPL(ocfs2_dlm_lock);
256 256
257int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, 257int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
258 union ocfs2_dlm_lksb *lksb, 258 struct ocfs2_dlm_lksb *lksb,
259 u32 flags, 259 u32 flags)
260 struct ocfs2_lock_res *astarg)
261{ 260{
262 BUG_ON(lproto == NULL); 261 BUG_ON(lksb->lksb_conn == NULL);
263 262
264 return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg); 263 return active_stack->sp_ops->dlm_unlock(conn, lksb, flags);
265} 264}
266EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock); 265EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock);
267 266
268int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb) 267int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
269{ 268{
270 return active_stack->sp_ops->lock_status(lksb); 269 return active_stack->sp_ops->lock_status(lksb);
271} 270}
272EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status); 271EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
273 272
274int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb) 273int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
275{ 274{
276 return active_stack->sp_ops->lvb_valid(lksb); 275 return active_stack->sp_ops->lvb_valid(lksb);
277} 276}
278EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid); 277EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid);
279 278
280void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb) 279void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
281{ 280{
282 return active_stack->sp_ops->lock_lvb(lksb); 281 return active_stack->sp_ops->lock_lvb(lksb);
283} 282}
284EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb); 283EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb);
285 284
286void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb) 285void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
287{ 286{
288 active_stack->sp_ops->dump_lksb(lksb); 287 active_stack->sp_ops->dump_lksb(lksb);
289} 288}
@@ -312,6 +311,7 @@ EXPORT_SYMBOL_GPL(ocfs2_plock);
312int ocfs2_cluster_connect(const char *stack_name, 311int ocfs2_cluster_connect(const char *stack_name,
313 const char *group, 312 const char *group,
314 int grouplen, 313 int grouplen,
314 struct ocfs2_locking_protocol *lproto,
315 void (*recovery_handler)(int node_num, 315 void (*recovery_handler)(int node_num,
316 void *recovery_data), 316 void *recovery_data),
317 void *recovery_data, 317 void *recovery_data,
@@ -329,6 +329,12 @@ int ocfs2_cluster_connect(const char *stack_name,
329 goto out; 329 goto out;
330 } 330 }
331 331
332 if (memcmp(&lproto->lp_max_version, &locking_max_version,
333 sizeof(struct ocfs2_protocol_version))) {
334 rc = -EINVAL;
335 goto out;
336 }
337
332 new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection), 338 new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection),
333 GFP_KERNEL); 339 GFP_KERNEL);
334 if (!new_conn) { 340 if (!new_conn) {
@@ -341,6 +347,7 @@ int ocfs2_cluster_connect(const char *stack_name,
341 new_conn->cc_recovery_handler = recovery_handler; 347 new_conn->cc_recovery_handler = recovery_handler;
342 new_conn->cc_recovery_data = recovery_data; 348 new_conn->cc_recovery_data = recovery_data;
343 349
350 new_conn->cc_proto = lproto;
344 /* Start the new connection at our maximum compatibility level */ 351 /* Start the new connection at our maximum compatibility level */
345 new_conn->cc_version = lproto->lp_max_version; 352 new_conn->cc_version = lproto->lp_max_version;
346 353
@@ -366,6 +373,24 @@ out:
366} 373}
367EXPORT_SYMBOL_GPL(ocfs2_cluster_connect); 374EXPORT_SYMBOL_GPL(ocfs2_cluster_connect);
368 375
376/* The caller will ensure all nodes have the same cluster stack */
377int ocfs2_cluster_connect_agnostic(const char *group,
378 int grouplen,
379 struct ocfs2_locking_protocol *lproto,
380 void (*recovery_handler)(int node_num,
381 void *recovery_data),
382 void *recovery_data,
383 struct ocfs2_cluster_connection **conn)
384{
385 char *stack_name = NULL;
386
387 if (cluster_stack_name[0])
388 stack_name = cluster_stack_name;
389 return ocfs2_cluster_connect(stack_name, group, grouplen, lproto,
390 recovery_handler, recovery_data, conn);
391}
392EXPORT_SYMBOL_GPL(ocfs2_cluster_connect_agnostic);
393
369/* If hangup_pending is 0, the stack driver will be dropped */ 394/* If hangup_pending is 0, the stack driver will be dropped */
370int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, 395int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
371 int hangup_pending) 396 int hangup_pending)
@@ -453,10 +478,10 @@ static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj,
453 ssize_t ret = 0; 478 ssize_t ret = 0;
454 479
455 spin_lock(&ocfs2_stack_lock); 480 spin_lock(&ocfs2_stack_lock);
456 if (lproto) 481 if (locking_max_version.pv_major)
457 ret = snprintf(buf, PAGE_SIZE, "%u.%u\n", 482 ret = snprintf(buf, PAGE_SIZE, "%u.%u\n",
458 lproto->lp_max_version.pv_major, 483 locking_max_version.pv_major,
459 lproto->lp_max_version.pv_minor); 484 locking_max_version.pv_minor);
460 spin_unlock(&ocfs2_stack_lock); 485 spin_unlock(&ocfs2_stack_lock);
461 486
462 return ret; 487 return ret;
@@ -685,7 +710,10 @@ static int __init ocfs2_stack_glue_init(void)
685 710
686static void __exit ocfs2_stack_glue_exit(void) 711static void __exit ocfs2_stack_glue_exit(void)
687{ 712{
688 lproto = NULL; 713 memset(&locking_max_version, 0,
714 sizeof(struct ocfs2_protocol_version));
715 locking_max_version.pv_major = 0;
716 locking_max_version.pv_minor = 0;
689 ocfs2_sysfs_exit(); 717 ocfs2_sysfs_exit();
690 if (ocfs2_table_header) 718 if (ocfs2_table_header)
691 unregister_sysctl_table(ocfs2_table_header); 719 unregister_sysctl_table(ocfs2_table_header);
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 03a44d60eac9..8ce7398ae1d2 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -56,17 +56,6 @@ struct ocfs2_protocol_version {
56}; 56};
57 57
58/* 58/*
59 * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
60 */
61struct ocfs2_locking_protocol {
62 struct ocfs2_protocol_version lp_max_version;
63 void (*lp_lock_ast)(void *astarg);
64 void (*lp_blocking_ast)(void *astarg, int level);
65 void (*lp_unlock_ast)(void *astarg, int error);
66};
67
68
69/*
70 * The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only 59 * The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only
71 * has a pointer to separately allocated lvb space. This struct exists only to 60 * has a pointer to separately allocated lvb space. This struct exists only to
72 * include in the lksb union to make space for a combined dlm_lksb and lvb. 61 * include in the lksb union to make space for a combined dlm_lksb and lvb.
@@ -81,12 +70,27 @@ struct fsdlm_lksb_plus_lvb {
81 * size of the union is known. Lock status structures are embedded in 70 * size of the union is known. Lock status structures are embedded in
82 * ocfs2 inodes. 71 * ocfs2 inodes.
83 */ 72 */
84union ocfs2_dlm_lksb { 73struct ocfs2_cluster_connection;
85 struct dlm_lockstatus lksb_o2dlm; 74struct ocfs2_dlm_lksb {
86 struct dlm_lksb lksb_fsdlm; 75 union {
87 struct fsdlm_lksb_plus_lvb padding; 76 struct dlm_lockstatus lksb_o2dlm;
77 struct dlm_lksb lksb_fsdlm;
78 struct fsdlm_lksb_plus_lvb padding;
79 };
80 struct ocfs2_cluster_connection *lksb_conn;
81};
82
83/*
84 * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
85 */
86struct ocfs2_locking_protocol {
87 struct ocfs2_protocol_version lp_max_version;
88 void (*lp_lock_ast)(struct ocfs2_dlm_lksb *lksb);
89 void (*lp_blocking_ast)(struct ocfs2_dlm_lksb *lksb, int level);
90 void (*lp_unlock_ast)(struct ocfs2_dlm_lksb *lksb, int error);
88}; 91};
89 92
93
90/* 94/*
91 * A cluster connection. Mostly opaque to ocfs2, the connection holds 95 * A cluster connection. Mostly opaque to ocfs2, the connection holds
92 * state for the underlying stack. ocfs2 does use cc_version to determine 96 * state for the underlying stack. ocfs2 does use cc_version to determine
@@ -96,6 +100,7 @@ struct ocfs2_cluster_connection {
96 char cc_name[GROUP_NAME_MAX]; 100 char cc_name[GROUP_NAME_MAX];
97 int cc_namelen; 101 int cc_namelen;
98 struct ocfs2_protocol_version cc_version; 102 struct ocfs2_protocol_version cc_version;
103 struct ocfs2_locking_protocol *cc_proto;
99 void (*cc_recovery_handler)(int node_num, void *recovery_data); 104 void (*cc_recovery_handler)(int node_num, void *recovery_data);
100 void *cc_recovery_data; 105 void *cc_recovery_data;
101 void *cc_lockspace; 106 void *cc_lockspace;
@@ -155,27 +160,29 @@ struct ocfs2_stack_operations {
155 * 160 *
156 * ast and bast functions are not part of the call because the 161 * ast and bast functions are not part of the call because the
157 * stack will likely want to wrap ast and bast calls before passing 162 * stack will likely want to wrap ast and bast calls before passing
158 * them to stack->sp_proto. 163 * them to stack->sp_proto. There is no astarg. The lksb will
164 * be passed back to the ast and bast functions. The caller can
165 * use this to find their object.
159 */ 166 */
160 int (*dlm_lock)(struct ocfs2_cluster_connection *conn, 167 int (*dlm_lock)(struct ocfs2_cluster_connection *conn,
161 int mode, 168 int mode,
162 union ocfs2_dlm_lksb *lksb, 169 struct ocfs2_dlm_lksb *lksb,
163 u32 flags, 170 u32 flags,
164 void *name, 171 void *name,
165 unsigned int namelen, 172 unsigned int namelen);
166 void *astarg);
167 173
168 /* 174 /*
169 * Call the underlying dlm unlock function. The ->dlm_unlock() 175 * Call the underlying dlm unlock function. The ->dlm_unlock()
170 * function should convert the flags as appropriate. 176 * function should convert the flags as appropriate.
171 * 177 *
172 * The unlock ast is not passed, as the stack will want to wrap 178 * The unlock ast is not passed, as the stack will want to wrap
173 * it before calling stack->sp_proto->lp_unlock_ast(). 179 * it before calling stack->sp_proto->lp_unlock_ast(). There is
180 * no astarg. The lksb will be passed back to the unlock ast
181 * function. The caller can use this to find their object.
174 */ 182 */
175 int (*dlm_unlock)(struct ocfs2_cluster_connection *conn, 183 int (*dlm_unlock)(struct ocfs2_cluster_connection *conn,
176 union ocfs2_dlm_lksb *lksb, 184 struct ocfs2_dlm_lksb *lksb,
177 u32 flags, 185 u32 flags);
178 void *astarg);
179 186
180 /* 187 /*
181 * Return the status of the current lock status block. The fs 188 * Return the status of the current lock status block. The fs
@@ -183,17 +190,17 @@ struct ocfs2_stack_operations {
183 * callback pulls out the stack-specific lksb, converts the status 190 * callback pulls out the stack-specific lksb, converts the status
184 * to a proper errno, and returns it. 191 * to a proper errno, and returns it.
185 */ 192 */
186 int (*lock_status)(union ocfs2_dlm_lksb *lksb); 193 int (*lock_status)(struct ocfs2_dlm_lksb *lksb);
187 194
188 /* 195 /*
189 * Return non-zero if the LVB is valid. 196 * Return non-zero if the LVB is valid.
190 */ 197 */
191 int (*lvb_valid)(union ocfs2_dlm_lksb *lksb); 198 int (*lvb_valid)(struct ocfs2_dlm_lksb *lksb);
192 199
193 /* 200 /*
194 * Pull the lvb pointer off of the stack-specific lksb. 201 * Pull the lvb pointer off of the stack-specific lksb.
195 */ 202 */
196 void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb); 203 void *(*lock_lvb)(struct ocfs2_dlm_lksb *lksb);
197 204
198 /* 205 /*
199 * Cluster-aware posix locks 206 * Cluster-aware posix locks
@@ -210,7 +217,7 @@ struct ocfs2_stack_operations {
210 * This is an optoinal debugging hook. If provided, the 217 * This is an optoinal debugging hook. If provided, the
211 * stack can dump debugging information about this lock. 218 * stack can dump debugging information about this lock.
212 */ 219 */
213 void (*dump_lksb)(union ocfs2_dlm_lksb *lksb); 220 void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb);
214}; 221};
215 222
216/* 223/*
@@ -226,7 +233,7 @@ struct ocfs2_stack_plugin {
226 /* These are managed by the stackglue code. */ 233 /* These are managed by the stackglue code. */
227 struct list_head sp_list; 234 struct list_head sp_list;
228 unsigned int sp_count; 235 unsigned int sp_count;
229 struct ocfs2_locking_protocol *sp_proto; 236 struct ocfs2_protocol_version sp_max_proto;
230}; 237};
231 238
232 239
@@ -234,10 +241,22 @@ struct ocfs2_stack_plugin {
234int ocfs2_cluster_connect(const char *stack_name, 241int ocfs2_cluster_connect(const char *stack_name,
235 const char *group, 242 const char *group,
236 int grouplen, 243 int grouplen,
244 struct ocfs2_locking_protocol *lproto,
237 void (*recovery_handler)(int node_num, 245 void (*recovery_handler)(int node_num,
238 void *recovery_data), 246 void *recovery_data),
239 void *recovery_data, 247 void *recovery_data,
240 struct ocfs2_cluster_connection **conn); 248 struct ocfs2_cluster_connection **conn);
249/*
250 * Used by callers that don't store their stack name. They must ensure
251 * all nodes have the same stack.
252 */
253int ocfs2_cluster_connect_agnostic(const char *group,
254 int grouplen,
255 struct ocfs2_locking_protocol *lproto,
256 void (*recovery_handler)(int node_num,
257 void *recovery_data),
258 void *recovery_data,
259 struct ocfs2_cluster_connection **conn);
241int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, 260int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
242 int hangup_pending); 261 int hangup_pending);
243void ocfs2_cluster_hangup(const char *group, int grouplen); 262void ocfs2_cluster_hangup(const char *group, int grouplen);
@@ -246,26 +265,24 @@ int ocfs2_cluster_this_node(unsigned int *node);
246struct ocfs2_lock_res; 265struct ocfs2_lock_res;
247int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, 266int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
248 int mode, 267 int mode,
249 union ocfs2_dlm_lksb *lksb, 268 struct ocfs2_dlm_lksb *lksb,
250 u32 flags, 269 u32 flags,
251 void *name, 270 void *name,
252 unsigned int namelen, 271 unsigned int namelen);
253 struct ocfs2_lock_res *astarg);
254int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, 272int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
255 union ocfs2_dlm_lksb *lksb, 273 struct ocfs2_dlm_lksb *lksb,
256 u32 flags, 274 u32 flags);
257 struct ocfs2_lock_res *astarg);
258 275
259int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb); 276int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb);
260int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb); 277int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb);
261void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb); 278void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb);
262void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb); 279void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb);
263 280
264int ocfs2_stack_supports_plocks(void); 281int ocfs2_stack_supports_plocks(void);
265int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino, 282int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino,
266 struct file *file, int cmd, struct file_lock *fl); 283 struct file *file, int cmd, struct file_lock *fl);
267 284
268void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto); 285void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto);
269 286
270 287
271/* Used by stack plugins */ 288/* Used by stack plugins */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index c30b644d9572..c3c60bc3e072 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -51,7 +51,7 @@
51#define ALLOC_NEW_GROUP 0x1 51#define ALLOC_NEW_GROUP 0x1
52#define ALLOC_GROUPS_FROM_GLOBAL 0x2 52#define ALLOC_GROUPS_FROM_GLOBAL 0x2
53 53
54#define OCFS2_MAX_INODES_TO_STEAL 1024 54#define OCFS2_MAX_TO_STEAL 1024
55 55
56static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); 56static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
57static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); 57static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
@@ -637,12 +637,113 @@ bail:
637 return status; 637 return status;
638} 638}
639 639
640static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
641{
642 spin_lock(&osb->osb_lock);
643 osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
644 spin_unlock(&osb->osb_lock);
645 atomic_set(&osb->s_num_inodes_stolen, 0);
646}
647
648static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb)
649{
650 spin_lock(&osb->osb_lock);
651 osb->s_meta_steal_slot = OCFS2_INVALID_SLOT;
652 spin_unlock(&osb->osb_lock);
653 atomic_set(&osb->s_num_meta_stolen, 0);
654}
655
656void ocfs2_init_steal_slots(struct ocfs2_super *osb)
657{
658 ocfs2_init_inode_steal_slot(osb);
659 ocfs2_init_meta_steal_slot(osb);
660}
661
662static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
663{
664 spin_lock(&osb->osb_lock);
665 if (type == INODE_ALLOC_SYSTEM_INODE)
666 osb->s_inode_steal_slot = slot;
667 else if (type == EXTENT_ALLOC_SYSTEM_INODE)
668 osb->s_meta_steal_slot = slot;
669 spin_unlock(&osb->osb_lock);
670}
671
672static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type)
673{
674 int slot = OCFS2_INVALID_SLOT;
675
676 spin_lock(&osb->osb_lock);
677 if (type == INODE_ALLOC_SYSTEM_INODE)
678 slot = osb->s_inode_steal_slot;
679 else if (type == EXTENT_ALLOC_SYSTEM_INODE)
680 slot = osb->s_meta_steal_slot;
681 spin_unlock(&osb->osb_lock);
682
683 return slot;
684}
685
686static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
687{
688 return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE);
689}
690
691static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb)
692{
693 return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE);
694}
695
696static int ocfs2_steal_resource(struct ocfs2_super *osb,
697 struct ocfs2_alloc_context *ac,
698 int type)
699{
700 int i, status = -ENOSPC;
701 int slot = __ocfs2_get_steal_slot(osb, type);
702
703 /* Start to steal resource from the first slot after ours. */
704 if (slot == OCFS2_INVALID_SLOT)
705 slot = osb->slot_num + 1;
706
707 for (i = 0; i < osb->max_slots; i++, slot++) {
708 if (slot == osb->max_slots)
709 slot = 0;
710
711 if (slot == osb->slot_num)
712 continue;
713
714 status = ocfs2_reserve_suballoc_bits(osb, ac,
715 type,
716 (u32)slot, NULL,
717 NOT_ALLOC_NEW_GROUP);
718 if (status >= 0) {
719 __ocfs2_set_steal_slot(osb, slot, type);
720 break;
721 }
722
723 ocfs2_free_ac_resource(ac);
724 }
725
726 return status;
727}
728
729static int ocfs2_steal_inode(struct ocfs2_super *osb,
730 struct ocfs2_alloc_context *ac)
731{
732 return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE);
733}
734
735static int ocfs2_steal_meta(struct ocfs2_super *osb,
736 struct ocfs2_alloc_context *ac)
737{
738 return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE);
739}
740
640int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb, 741int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
641 int blocks, 742 int blocks,
642 struct ocfs2_alloc_context **ac) 743 struct ocfs2_alloc_context **ac)
643{ 744{
644 int status; 745 int status;
645 u32 slot; 746 int slot = ocfs2_get_meta_steal_slot(osb);
646 747
647 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 748 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
648 if (!(*ac)) { 749 if (!(*ac)) {
@@ -653,12 +754,34 @@ int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
653 754
654 (*ac)->ac_bits_wanted = blocks; 755 (*ac)->ac_bits_wanted = blocks;
655 (*ac)->ac_which = OCFS2_AC_USE_META; 756 (*ac)->ac_which = OCFS2_AC_USE_META;
656 slot = osb->slot_num;
657 (*ac)->ac_group_search = ocfs2_block_group_search; 757 (*ac)->ac_group_search = ocfs2_block_group_search;
658 758
759 if (slot != OCFS2_INVALID_SLOT &&
760 atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL)
761 goto extent_steal;
762
763 atomic_set(&osb->s_num_meta_stolen, 0);
659 status = ocfs2_reserve_suballoc_bits(osb, (*ac), 764 status = ocfs2_reserve_suballoc_bits(osb, (*ac),
660 EXTENT_ALLOC_SYSTEM_INODE, 765 EXTENT_ALLOC_SYSTEM_INODE,
661 slot, NULL, ALLOC_NEW_GROUP); 766 (u32)osb->slot_num, NULL,
767 ALLOC_NEW_GROUP);
768
769
770 if (status >= 0) {
771 status = 0;
772 if (slot != OCFS2_INVALID_SLOT)
773 ocfs2_init_meta_steal_slot(osb);
774 goto bail;
775 } else if (status < 0 && status != -ENOSPC) {
776 mlog_errno(status);
777 goto bail;
778 }
779
780 ocfs2_free_ac_resource(*ac);
781
782extent_steal:
783 status = ocfs2_steal_meta(osb, *ac);
784 atomic_inc(&osb->s_num_meta_stolen);
662 if (status < 0) { 785 if (status < 0) {
663 if (status != -ENOSPC) 786 if (status != -ENOSPC)
664 mlog_errno(status); 787 mlog_errno(status);
@@ -685,43 +808,11 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
685 ac); 808 ac);
686} 809}
687 810
688static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
689 struct ocfs2_alloc_context *ac)
690{
691 int i, status = -ENOSPC;
692 s16 slot = ocfs2_get_inode_steal_slot(osb);
693
694 /* Start to steal inodes from the first slot after ours. */
695 if (slot == OCFS2_INVALID_SLOT)
696 slot = osb->slot_num + 1;
697
698 for (i = 0; i < osb->max_slots; i++, slot++) {
699 if (slot == osb->max_slots)
700 slot = 0;
701
702 if (slot == osb->slot_num)
703 continue;
704
705 status = ocfs2_reserve_suballoc_bits(osb, ac,
706 INODE_ALLOC_SYSTEM_INODE,
707 slot, NULL,
708 NOT_ALLOC_NEW_GROUP);
709 if (status >= 0) {
710 ocfs2_set_inode_steal_slot(osb, slot);
711 break;
712 }
713
714 ocfs2_free_ac_resource(ac);
715 }
716
717 return status;
718}
719
720int ocfs2_reserve_new_inode(struct ocfs2_super *osb, 811int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
721 struct ocfs2_alloc_context **ac) 812 struct ocfs2_alloc_context **ac)
722{ 813{
723 int status; 814 int status;
724 s16 slot = ocfs2_get_inode_steal_slot(osb); 815 int slot = ocfs2_get_inode_steal_slot(osb);
725 u64 alloc_group; 816 u64 alloc_group;
726 817
727 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 818 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
@@ -754,14 +845,14 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
754 * need to check our slots to see whether there is some space for us. 845 * need to check our slots to see whether there is some space for us.
755 */ 846 */
756 if (slot != OCFS2_INVALID_SLOT && 847 if (slot != OCFS2_INVALID_SLOT &&
757 atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL) 848 atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL)
758 goto inode_steal; 849 goto inode_steal;
759 850
760 atomic_set(&osb->s_num_inodes_stolen, 0); 851 atomic_set(&osb->s_num_inodes_stolen, 0);
761 alloc_group = osb->osb_inode_alloc_group; 852 alloc_group = osb->osb_inode_alloc_group;
762 status = ocfs2_reserve_suballoc_bits(osb, *ac, 853 status = ocfs2_reserve_suballoc_bits(osb, *ac,
763 INODE_ALLOC_SYSTEM_INODE, 854 INODE_ALLOC_SYSTEM_INODE,
764 osb->slot_num, 855 (u32)osb->slot_num,
765 &alloc_group, 856 &alloc_group,
766 ALLOC_NEW_GROUP | 857 ALLOC_NEW_GROUP |
767 ALLOC_GROUPS_FROM_GLOBAL); 858 ALLOC_GROUPS_FROM_GLOBAL);
@@ -789,7 +880,7 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
789 ocfs2_free_ac_resource(*ac); 880 ocfs2_free_ac_resource(*ac);
790 881
791inode_steal: 882inode_steal:
792 status = ocfs2_steal_inode_from_other_nodes(osb, *ac); 883 status = ocfs2_steal_inode(osb, *ac);
793 atomic_inc(&osb->s_num_inodes_stolen); 884 atomic_inc(&osb->s_num_inodes_stolen);
794 if (status < 0) { 885 if (status < 0) {
795 if (status != -ENOSPC) 886 if (status != -ENOSPC)
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index 8c9a78a43164..fa60723c43e8 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,7 @@ struct ocfs2_alloc_context {
56 is the same as ~0 - unlimited */ 56 is the same as ~0 - unlimited */
57}; 57};
58 58
59void ocfs2_init_steal_slots(struct ocfs2_super *osb);
59void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); 60void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac);
60static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac) 61static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac)
61{ 62{
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 755cd49a5ef3..dee03197a494 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -69,6 +69,7 @@
69#include "xattr.h" 69#include "xattr.h"
70#include "quota.h" 70#include "quota.h"
71#include "refcounttree.h" 71#include "refcounttree.h"
72#include "suballoc.h"
72 73
73#include "buffer_head_io.h" 74#include "buffer_head_io.h"
74 75
@@ -301,9 +302,12 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
301 302
302 spin_lock(&osb->osb_lock); 303 spin_lock(&osb->osb_lock);
303 out += snprintf(buf + out, len - out, 304 out += snprintf(buf + out, len - out,
304 "%10s => Slot: %d NumStolen: %d\n", "Steal", 305 "%10s => InodeSlot: %d StolenInodes: %d, "
306 "MetaSlot: %d StolenMeta: %d\n", "Steal",
305 osb->s_inode_steal_slot, 307 osb->s_inode_steal_slot,
306 atomic_read(&osb->s_num_inodes_stolen)); 308 atomic_read(&osb->s_num_inodes_stolen),
309 osb->s_meta_steal_slot,
310 atomic_read(&osb->s_num_meta_stolen));
307 spin_unlock(&osb->osb_lock); 311 spin_unlock(&osb->osb_lock);
308 312
309 out += snprintf(buf + out, len - out, "OrphanScan => "); 313 out += snprintf(buf + out, len - out, "OrphanScan => ");
@@ -1997,7 +2001,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1997 osb->blocked_lock_count = 0; 2001 osb->blocked_lock_count = 0;
1998 spin_lock_init(&osb->osb_lock); 2002 spin_lock_init(&osb->osb_lock);
1999 spin_lock_init(&osb->osb_xattr_lock); 2003 spin_lock_init(&osb->osb_xattr_lock);
2000 ocfs2_init_inode_steal_slot(osb); 2004 ocfs2_init_steal_slots(osb);
2001 2005
2002 atomic_set(&osb->alloc_stats.moves, 0); 2006 atomic_set(&osb->alloc_stats.moves, 0);
2003 atomic_set(&osb->alloc_stats.local_data, 0); 2007 atomic_set(&osb->alloc_stats.local_data, 0);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 8fc6fb071c6d..d1b0d386f6d1 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -116,10 +116,11 @@ static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
116}; 116};
117 117
118struct ocfs2_xattr_info { 118struct ocfs2_xattr_info {
119 int name_index; 119 int xi_name_index;
120 const char *name; 120 const char *xi_name;
121 const void *value; 121 int xi_name_len;
122 size_t value_len; 122 const void *xi_value;
123 size_t xi_value_len;
123}; 124};
124 125
125struct ocfs2_xattr_search { 126struct ocfs2_xattr_search {
@@ -137,6 +138,115 @@ struct ocfs2_xattr_search {
137 int not_found; 138 int not_found;
138}; 139};
139 140
141/* Operations on struct ocfs2_xa_entry */
142struct ocfs2_xa_loc;
143struct ocfs2_xa_loc_operations {
144 /*
145 * Journal functions
146 */
147 int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
148 int type);
149 void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
150
151 /*
152 * Return a pointer to the appropriate buffer in loc->xl_storage
153 * at the given offset from loc->xl_header.
154 */
155 void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
156
157 /* Can we reuse the existing entry for the new value? */
158 int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
159 struct ocfs2_xattr_info *xi);
160
161 /* How much space is needed for the new value? */
162 int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
163 struct ocfs2_xattr_info *xi);
164
165 /*
166 * Return the offset of the first name+value pair. This is
167 * the start of our downward-filling free space.
168 */
169 int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
170
171 /*
172 * Remove the name+value at this location. Do whatever is
173 * appropriate with the remaining name+value pairs.
174 */
175 void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
176
177 /* Fill xl_entry with a new entry */
178 void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
179
180 /* Add name+value storage to an entry */
181 void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
182
183 /*
184 * Initialize the value buf's access and bh fields for this entry.
185 * ocfs2_xa_fill_value_buf() will handle the xv pointer.
186 */
187 void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
188 struct ocfs2_xattr_value_buf *vb);
189};
190
191/*
192 * Describes an xattr entry location. This is a memory structure
193 * tracking the on-disk structure.
194 */
195struct ocfs2_xa_loc {
196 /* This xattr belongs to this inode */
197 struct inode *xl_inode;
198
199 /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
200 struct ocfs2_xattr_header *xl_header;
201
202 /* Bytes from xl_header to the end of the storage */
203 int xl_size;
204
205 /*
206 * The ocfs2_xattr_entry this location describes. If this is
207 * NULL, this location describes the on-disk structure where it
208 * would have been.
209 */
210 struct ocfs2_xattr_entry *xl_entry;
211
212 /*
213 * Internal housekeeping
214 */
215
216 /* Buffer(s) containing this entry */
217 void *xl_storage;
218
219 /* Operations on the storage backing this location */
220 const struct ocfs2_xa_loc_operations *xl_ops;
221};
222
223/*
224 * Convenience functions to calculate how much space is needed for a
225 * given name+value pair
226 */
227static int namevalue_size(int name_len, uint64_t value_len)
228{
229 if (value_len > OCFS2_XATTR_INLINE_SIZE)
230 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
231 else
232 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
233}
234
235static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
236{
237 return namevalue_size(xi->xi_name_len, xi->xi_value_len);
238}
239
240static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
241{
242 u64 value_len = le64_to_cpu(xe->xe_value_size);
243
244 BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
245 ocfs2_xattr_is_local(xe));
246 return namevalue_size(xe->xe_name_len, value_len);
247}
248
249
140static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb, 250static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
141 struct ocfs2_xattr_header *xh, 251 struct ocfs2_xattr_header *xh,
142 int index, 252 int index,
@@ -212,14 +322,6 @@ static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
212 return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits); 322 return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
213} 323}
214 324
215static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb)
216{
217 u16 len = sb->s_blocksize -
218 offsetof(struct ocfs2_xattr_header, xh_entries);
219
220 return len / sizeof(struct ocfs2_xattr_entry);
221}
222
223#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr) 325#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
224#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) 326#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
225#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0)) 327#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
@@ -463,35 +565,22 @@ static u32 ocfs2_xattr_name_hash(struct inode *inode,
463 return hash; 565 return hash;
464} 566}
465 567
466/* 568static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
467 * ocfs2_xattr_hash_entry()
468 *
469 * Compute the hash of an extended attribute.
470 */
471static void ocfs2_xattr_hash_entry(struct inode *inode,
472 struct ocfs2_xattr_header *header,
473 struct ocfs2_xattr_entry *entry)
474{ 569{
475 u32 hash = 0; 570 return namevalue_size(name_len, value_len) +
476 char *name = (char *)header + le16_to_cpu(entry->xe_name_offset); 571 sizeof(struct ocfs2_xattr_entry);
477
478 hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len);
479 entry->xe_name_hash = cpu_to_le32(hash);
480
481 return;
482} 572}
483 573
484static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len) 574static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
485{ 575{
486 int size = 0; 576 return namevalue_size_xi(xi) +
487 577 sizeof(struct ocfs2_xattr_entry);
488 if (value_len <= OCFS2_XATTR_INLINE_SIZE) 578}
489 size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
490 else
491 size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
492 size += sizeof(struct ocfs2_xattr_entry);
493 579
494 return size; 580static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
581{
582 return namevalue_size_xe(xe) +
583 sizeof(struct ocfs2_xattr_entry);
495} 584}
496 585
497int ocfs2_calc_security_init(struct inode *dir, 586int ocfs2_calc_security_init(struct inode *dir,
@@ -1308,452 +1397,897 @@ out:
1308 return ret; 1397 return ret;
1309} 1398}
1310 1399
1311static int ocfs2_xattr_cleanup(struct inode *inode, 1400static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
1312 handle_t *handle, 1401 int num_entries)
1313 struct ocfs2_xattr_info *xi,
1314 struct ocfs2_xattr_search *xs,
1315 struct ocfs2_xattr_value_buf *vb,
1316 size_t offs)
1317{ 1402{
1318 int ret = 0; 1403 int free_space;
1319 size_t name_len = strlen(xi->name);
1320 void *val = xs->base + offs;
1321 size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
1322 1404
1323 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, 1405 if (!needed_space)
1324 OCFS2_JOURNAL_ACCESS_WRITE); 1406 return 0;
1325 if (ret) {
1326 mlog_errno(ret);
1327 goto out;
1328 }
1329 /* Decrease xattr count */
1330 le16_add_cpu(&xs->header->xh_count, -1);
1331 /* Remove the xattr entry and tree root which has already be set*/
1332 memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry));
1333 memset(val, 0, size);
1334 1407
1335 ret = ocfs2_journal_dirty(handle, vb->vb_bh); 1408 free_space = free_start -
1336 if (ret < 0) 1409 sizeof(struct ocfs2_xattr_header) -
1337 mlog_errno(ret); 1410 (num_entries * sizeof(struct ocfs2_xattr_entry)) -
1338out: 1411 OCFS2_XATTR_HEADER_GAP;
1339 return ret; 1412 if (free_space < 0)
1413 return -EIO;
1414 if (free_space < needed_space)
1415 return -ENOSPC;
1416
1417 return 0;
1340} 1418}
1341 1419
1342static int ocfs2_xattr_update_entry(struct inode *inode, 1420static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
1343 handle_t *handle, 1421 int type)
1344 struct ocfs2_xattr_info *xi,
1345 struct ocfs2_xattr_search *xs,
1346 struct ocfs2_xattr_value_buf *vb,
1347 size_t offs)
1348{ 1422{
1349 int ret; 1423 return loc->xl_ops->xlo_journal_access(handle, loc, type);
1424}
1350 1425
1351 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, 1426static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
1352 OCFS2_JOURNAL_ACCESS_WRITE); 1427{
1353 if (ret) { 1428 loc->xl_ops->xlo_journal_dirty(handle, loc);
1354 mlog_errno(ret); 1429}
1355 goto out;
1356 }
1357 1430
1358 xs->here->xe_name_offset = cpu_to_le16(offs); 1431/* Give a pointer into the storage for the given offset */
1359 xs->here->xe_value_size = cpu_to_le64(xi->value_len); 1432static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
1360 if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE) 1433{
1361 ocfs2_xattr_set_local(xs->here, 1); 1434 BUG_ON(offset >= loc->xl_size);
1362 else 1435 return loc->xl_ops->xlo_offset_pointer(loc, offset);
1363 ocfs2_xattr_set_local(xs->here, 0); 1436}
1364 ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
1365 1437
1366 ret = ocfs2_journal_dirty(handle, vb->vb_bh); 1438/*
1367 if (ret < 0) 1439 * Wipe the name+value pair and allow the storage to reclaim it. This
1368 mlog_errno(ret); 1440 * must be followed by either removal of the entry or a call to
1369out: 1441 * ocfs2_xa_add_namevalue().
1370 return ret; 1442 */
1443static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
1444{
1445 loc->xl_ops->xlo_wipe_namevalue(loc);
1371} 1446}
1372 1447
1373/* 1448/*
1374 * ocfs2_xattr_set_value_outside() 1449 * Find lowest offset to a name+value pair. This is the start of our
1375 * 1450 * downward-growing free space.
1376 * Set large size value in B tree.
1377 */ 1451 */
1378static int ocfs2_xattr_set_value_outside(struct inode *inode, 1452static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
1379 struct ocfs2_xattr_info *xi,
1380 struct ocfs2_xattr_search *xs,
1381 struct ocfs2_xattr_set_ctxt *ctxt,
1382 struct ocfs2_xattr_value_buf *vb,
1383 size_t offs)
1384{ 1453{
1385 size_t name_len = strlen(xi->name); 1454 return loc->xl_ops->xlo_get_free_start(loc);
1386 void *val = xs->base + offs; 1455}
1387 struct ocfs2_xattr_value_root *xv = NULL;
1388 size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
1389 int ret = 0;
1390 1456
1391 memset(val, 0, size); 1457/* Can we reuse loc->xl_entry for xi? */
1392 memcpy(val, xi->name, name_len); 1458static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
1393 xv = (struct ocfs2_xattr_value_root *) 1459 struct ocfs2_xattr_info *xi)
1394 (val + OCFS2_XATTR_SIZE(name_len)); 1460{
1395 xv->xr_clusters = 0; 1461 return loc->xl_ops->xlo_can_reuse(loc, xi);
1396 xv->xr_last_eb_blk = 0; 1462}
1397 xv->xr_list.l_tree_depth = 0; 1463
1398 xv->xr_list.l_count = cpu_to_le16(1); 1464/* How much free space is needed to set the new value */
1399 xv->xr_list.l_next_free_rec = 0; 1465static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
1400 vb->vb_xv = xv; 1466 struct ocfs2_xattr_info *xi)
1401 1467{
1402 ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt); 1468 return loc->xl_ops->xlo_check_space(loc, xi);
1403 if (ret < 0) { 1469}
1404 mlog_errno(ret); 1470
1405 return ret; 1471static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1472{
1473 loc->xl_ops->xlo_add_entry(loc, name_hash);
1474 loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
1475 /*
1476 * We can't leave the new entry's xe_name_offset at zero or
1477 * add_namevalue() will go nuts. We set it to the size of our
1478 * storage so that it can never be less than any other entry.
1479 */
1480 loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
1481}
1482
1483static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
1484 struct ocfs2_xattr_info *xi)
1485{
1486 int size = namevalue_size_xi(xi);
1487 int nameval_offset;
1488 char *nameval_buf;
1489
1490 loc->xl_ops->xlo_add_namevalue(loc, size);
1491 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
1492 loc->xl_entry->xe_name_len = xi->xi_name_len;
1493 ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
1494 ocfs2_xattr_set_local(loc->xl_entry,
1495 xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
1496
1497 nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1498 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
1499 memset(nameval_buf, 0, size);
1500 memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
1501}
1502
1503static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
1504 struct ocfs2_xattr_value_buf *vb)
1505{
1506 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1507 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
1508
1509 /* Value bufs are for value trees */
1510 BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
1511 BUG_ON(namevalue_size_xe(loc->xl_entry) !=
1512 (name_size + OCFS2_XATTR_ROOT_SIZE));
1513
1514 loc->xl_ops->xlo_fill_value_buf(loc, vb);
1515 vb->vb_xv =
1516 (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
1517 nameval_offset +
1518 name_size);
1519}
1520
1521static int ocfs2_xa_block_journal_access(handle_t *handle,
1522 struct ocfs2_xa_loc *loc, int type)
1523{
1524 struct buffer_head *bh = loc->xl_storage;
1525 ocfs2_journal_access_func access;
1526
1527 if (loc->xl_size == (bh->b_size -
1528 offsetof(struct ocfs2_xattr_block,
1529 xb_attrs.xb_header)))
1530 access = ocfs2_journal_access_xb;
1531 else
1532 access = ocfs2_journal_access_di;
1533 return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
1534}
1535
1536static void ocfs2_xa_block_journal_dirty(handle_t *handle,
1537 struct ocfs2_xa_loc *loc)
1538{
1539 struct buffer_head *bh = loc->xl_storage;
1540
1541 ocfs2_journal_dirty(handle, bh);
1542}
1543
1544static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
1545 int offset)
1546{
1547 return (char *)loc->xl_header + offset;
1548}
1549
1550static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
1551 struct ocfs2_xattr_info *xi)
1552{
1553 /*
1554 * Block storage is strict. If the sizes aren't exact, we will
1555 * remove the old one and reinsert the new.
1556 */
1557 return namevalue_size_xe(loc->xl_entry) ==
1558 namevalue_size_xi(xi);
1559}
1560
1561static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
1562{
1563 struct ocfs2_xattr_header *xh = loc->xl_header;
1564 int i, count = le16_to_cpu(xh->xh_count);
1565 int offset, free_start = loc->xl_size;
1566
1567 for (i = 0; i < count; i++) {
1568 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
1569 if (offset < free_start)
1570 free_start = offset;
1406 } 1571 }
1407 ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs); 1572
1408 if (ret < 0) { 1573 return free_start;
1409 mlog_errno(ret); 1574}
1410 return ret; 1575
1576static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
1577 struct ocfs2_xattr_info *xi)
1578{
1579 int count = le16_to_cpu(loc->xl_header->xh_count);
1580 int free_start = ocfs2_xa_get_free_start(loc);
1581 int needed_space = ocfs2_xi_entry_usage(xi);
1582
1583 /*
1584 * Block storage will reclaim the original entry before inserting
1585 * the new value, so we only need the difference. If the new
1586 * entry is smaller than the old one, we don't need anything.
1587 */
1588 if (loc->xl_entry) {
1589 /* Don't need space if we're reusing! */
1590 if (ocfs2_xa_can_reuse_entry(loc, xi))
1591 needed_space = 0;
1592 else
1593 needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
1411 } 1594 }
1412 ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb, 1595 if (needed_space < 0)
1413 xi->value, xi->value_len); 1596 needed_space = 0;
1414 if (ret < 0) 1597 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1415 mlog_errno(ret); 1598}
1416 1599
1417 return ret; 1600/*
1601 * Block storage for xattrs keeps the name+value pairs compacted. When
1602 * we remove one, we have to shift any that preceded it towards the end.
1603 */
1604static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
1605{
1606 int i, offset;
1607 int namevalue_offset, first_namevalue_offset, namevalue_size;
1608 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1609 struct ocfs2_xattr_header *xh = loc->xl_header;
1610 int count = le16_to_cpu(xh->xh_count);
1611
1612 namevalue_offset = le16_to_cpu(entry->xe_name_offset);
1613 namevalue_size = namevalue_size_xe(entry);
1614 first_namevalue_offset = ocfs2_xa_get_free_start(loc);
1615
1616 /* Shift the name+value pairs */
1617 memmove((char *)xh + first_namevalue_offset + namevalue_size,
1618 (char *)xh + first_namevalue_offset,
1619 namevalue_offset - first_namevalue_offset);
1620 memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
1621
1622 /* Now tell xh->xh_entries about it */
1623 for (i = 0; i < count; i++) {
1624 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
1625 if (offset < namevalue_offset)
1626 le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
1627 namevalue_size);
1628 }
1629
1630 /*
1631 * Note that we don't update xh_free_start or xh_name_value_len
1632 * because they're not used in block-stored xattrs.
1633 */
1634}
1635
1636static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1637{
1638 int count = le16_to_cpu(loc->xl_header->xh_count);
1639 loc->xl_entry = &(loc->xl_header->xh_entries[count]);
1640 le16_add_cpu(&loc->xl_header->xh_count, 1);
1641 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1642}
1643
1644static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1645{
1646 int free_start = ocfs2_xa_get_free_start(loc);
1647
1648 loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
1649}
1650
1651static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
1652 struct ocfs2_xattr_value_buf *vb)
1653{
1654 struct buffer_head *bh = loc->xl_storage;
1655
1656 if (loc->xl_size == (bh->b_size -
1657 offsetof(struct ocfs2_xattr_block,
1658 xb_attrs.xb_header)))
1659 vb->vb_access = ocfs2_journal_access_xb;
1660 else
1661 vb->vb_access = ocfs2_journal_access_di;
1662 vb->vb_bh = bh;
1418} 1663}
1419 1664
1420/* 1665/*
1421 * ocfs2_xattr_set_entry_local() 1666 * Operations for xattrs stored in blocks. This includes inline inode
1422 * 1667 * storage and unindexed ocfs2_xattr_blocks.
1423 * Set, replace or remove extended attribute in local.
1424 */ 1668 */
1425static void ocfs2_xattr_set_entry_local(struct inode *inode, 1669static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
1426 struct ocfs2_xattr_info *xi, 1670 .xlo_journal_access = ocfs2_xa_block_journal_access,
1427 struct ocfs2_xattr_search *xs, 1671 .xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
1428 struct ocfs2_xattr_entry *last, 1672 .xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
1429 size_t min_offs) 1673 .xlo_check_space = ocfs2_xa_block_check_space,
1674 .xlo_can_reuse = ocfs2_xa_block_can_reuse,
1675 .xlo_get_free_start = ocfs2_xa_block_get_free_start,
1676 .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
1677 .xlo_add_entry = ocfs2_xa_block_add_entry,
1678 .xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
1679 .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
1680};
1681
1682static int ocfs2_xa_bucket_journal_access(handle_t *handle,
1683 struct ocfs2_xa_loc *loc, int type)
1430{ 1684{
1431 size_t name_len = strlen(xi->name); 1685 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1432 int i;
1433 1686
1434 if (xi->value && xs->not_found) { 1687 return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
1435 /* Insert the new xattr entry. */ 1688}
1436 le16_add_cpu(&xs->header->xh_count, 1); 1689
1437 ocfs2_xattr_set_type(last, xi->name_index); 1690static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
1438 ocfs2_xattr_set_local(last, 1); 1691 struct ocfs2_xa_loc *loc)
1439 last->xe_name_len = name_len; 1692{
1440 } else { 1693 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1441 void *first_val; 1694
1442 void *val; 1695 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
1443 size_t offs, size; 1696}
1444 1697
1445 first_val = xs->base + min_offs; 1698static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
1446 offs = le16_to_cpu(xs->here->xe_name_offset); 1699 int offset)
1447 val = xs->base + offs; 1700{
1448 1701 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1449 if (le64_to_cpu(xs->here->xe_value_size) > 1702 int block, block_offset;
1450 OCFS2_XATTR_INLINE_SIZE) 1703
1451 size = OCFS2_XATTR_SIZE(name_len) + 1704 /* The header is at the front of the bucket */
1452 OCFS2_XATTR_ROOT_SIZE; 1705 block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
1706 block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
1707
1708 return bucket_block(bucket, block) + block_offset;
1709}
1710
1711static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
1712 struct ocfs2_xattr_info *xi)
1713{
1714 return namevalue_size_xe(loc->xl_entry) >=
1715 namevalue_size_xi(xi);
1716}
1717
1718static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
1719{
1720 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1721 return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
1722}
1723
1724static int ocfs2_bucket_align_free_start(struct super_block *sb,
1725 int free_start, int size)
1726{
1727 /*
1728 * We need to make sure that the name+value pair fits within
1729 * one block.
1730 */
1731 if (((free_start - size) >> sb->s_blocksize_bits) !=
1732 ((free_start - 1) >> sb->s_blocksize_bits))
1733 free_start -= free_start % sb->s_blocksize;
1734
1735 return free_start;
1736}
1737
1738static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
1739 struct ocfs2_xattr_info *xi)
1740{
1741 int rc;
1742 int count = le16_to_cpu(loc->xl_header->xh_count);
1743 int free_start = ocfs2_xa_get_free_start(loc);
1744 int needed_space = ocfs2_xi_entry_usage(xi);
1745 int size = namevalue_size_xi(xi);
1746 struct super_block *sb = loc->xl_inode->i_sb;
1747
1748 /*
1749 * Bucket storage does not reclaim name+value pairs it cannot
1750 * reuse. They live as holes until the bucket fills, and then
1751 * the bucket is defragmented. However, the bucket can reclaim
1752 * the ocfs2_xattr_entry.
1753 */
1754 if (loc->xl_entry) {
1755 /* Don't need space if we're reusing! */
1756 if (ocfs2_xa_can_reuse_entry(loc, xi))
1757 needed_space = 0;
1453 else 1758 else
1454 size = OCFS2_XATTR_SIZE(name_len) + 1759 needed_space -= sizeof(struct ocfs2_xattr_entry);
1455 OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); 1760 }
1456 1761 BUG_ON(needed_space < 0);
1457 if (xi->value && size == OCFS2_XATTR_SIZE(name_len) +
1458 OCFS2_XATTR_SIZE(xi->value_len)) {
1459 /* The old and the new value have the
1460 same size. Just replace the value. */
1461 ocfs2_xattr_set_local(xs->here, 1);
1462 xs->here->xe_value_size = cpu_to_le64(xi->value_len);
1463 /* Clear value bytes. */
1464 memset(val + OCFS2_XATTR_SIZE(name_len),
1465 0,
1466 OCFS2_XATTR_SIZE(xi->value_len));
1467 memcpy(val + OCFS2_XATTR_SIZE(name_len),
1468 xi->value,
1469 xi->value_len);
1470 return;
1471 }
1472 /* Remove the old name+value. */
1473 memmove(first_val + size, first_val, val - first_val);
1474 memset(first_val, 0, size);
1475 xs->here->xe_name_hash = 0;
1476 xs->here->xe_name_offset = 0;
1477 ocfs2_xattr_set_local(xs->here, 1);
1478 xs->here->xe_value_size = 0;
1479
1480 min_offs += size;
1481
1482 /* Adjust all value offsets. */
1483 last = xs->header->xh_entries;
1484 for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
1485 size_t o = le16_to_cpu(last->xe_name_offset);
1486
1487 if (o < offs)
1488 last->xe_name_offset = cpu_to_le16(o + size);
1489 last += 1;
1490 }
1491 1762
1492 if (!xi->value) { 1763 if (free_start < size) {
1493 /* Remove the old entry. */ 1764 if (needed_space)
1494 last -= 1; 1765 return -ENOSPC;
1495 memmove(xs->here, xs->here + 1, 1766 } else {
1496 (void *)last - (void *)xs->here); 1767 /*
1497 memset(last, 0, sizeof(struct ocfs2_xattr_entry)); 1768 * First we check if it would fit in the first place.
1498 le16_add_cpu(&xs->header->xh_count, -1); 1769 * Below, we align the free start to a block. This may
1499 } 1770 * slide us below the minimum gap. By checking unaligned
1771 * first, we avoid that error.
1772 */
1773 rc = ocfs2_xa_check_space_helper(needed_space, free_start,
1774 count);
1775 if (rc)
1776 return rc;
1777 free_start = ocfs2_bucket_align_free_start(sb, free_start,
1778 size);
1500 } 1779 }
1501 if (xi->value) { 1780 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1502 /* Insert the new name+value. */ 1781}
1503 size_t size = OCFS2_XATTR_SIZE(name_len) + 1782
1504 OCFS2_XATTR_SIZE(xi->value_len); 1783static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
1505 void *val = xs->base + min_offs - size; 1784{
1785 le16_add_cpu(&loc->xl_header->xh_name_value_len,
1786 -namevalue_size_xe(loc->xl_entry));
1787}
1506 1788
1507 xs->here->xe_name_offset = cpu_to_le16(min_offs - size); 1789static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1508 memset(val, 0, size); 1790{
1509 memcpy(val, xi->name, name_len); 1791 struct ocfs2_xattr_header *xh = loc->xl_header;
1510 memcpy(val + OCFS2_XATTR_SIZE(name_len), 1792 int count = le16_to_cpu(xh->xh_count);
1511 xi->value, 1793 int low = 0, high = count - 1, tmp;
1512 xi->value_len); 1794 struct ocfs2_xattr_entry *tmp_xe;
1513 xs->here->xe_value_size = cpu_to_le64(xi->value_len); 1795
1514 ocfs2_xattr_set_local(xs->here, 1); 1796 /*
1515 ocfs2_xattr_hash_entry(inode, xs->header, xs->here); 1797 * We keep buckets sorted by name_hash, so we need to find
1798 * our insert place.
1799 */
1800 while (low <= high && count) {
1801 tmp = (low + high) / 2;
1802 tmp_xe = &xh->xh_entries[tmp];
1803
1804 if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
1805 low = tmp + 1;
1806 else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
1807 high = tmp - 1;
1808 else {
1809 low = tmp;
1810 break;
1811 }
1516 } 1812 }
1517 1813
1518 return; 1814 if (low != count)
1815 memmove(&xh->xh_entries[low + 1],
1816 &xh->xh_entries[low],
1817 ((count - low) * sizeof(struct ocfs2_xattr_entry)));
1818
1819 le16_add_cpu(&xh->xh_count, 1);
1820 loc->xl_entry = &xh->xh_entries[low];
1821 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1822}
1823
1824static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1825{
1826 int free_start = ocfs2_xa_get_free_start(loc);
1827 struct ocfs2_xattr_header *xh = loc->xl_header;
1828 struct super_block *sb = loc->xl_inode->i_sb;
1829 int nameval_offset;
1830
1831 free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
1832 nameval_offset = free_start - size;
1833 loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
1834 xh->xh_free_start = cpu_to_le16(nameval_offset);
1835 le16_add_cpu(&xh->xh_name_value_len, size);
1836
1837}
1838
1839static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
1840 struct ocfs2_xattr_value_buf *vb)
1841{
1842 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1843 struct super_block *sb = loc->xl_inode->i_sb;
1844 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1845 int size = namevalue_size_xe(loc->xl_entry);
1846 int block_offset = nameval_offset >> sb->s_blocksize_bits;
1847
1848 /* Values are not allowed to straddle block boundaries */
1849 BUG_ON(block_offset !=
1850 ((nameval_offset + size - 1) >> sb->s_blocksize_bits));
1851 /* We expect the bucket to be filled in */
1852 BUG_ON(!bucket->bu_bhs[block_offset]);
1853
1854 vb->vb_access = ocfs2_journal_access;
1855 vb->vb_bh = bucket->bu_bhs[block_offset];
1856}
1857
1858/* Operations for xattrs stored in buckets. */
1859static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
1860 .xlo_journal_access = ocfs2_xa_bucket_journal_access,
1861 .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
1862 .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
1863 .xlo_check_space = ocfs2_xa_bucket_check_space,
1864 .xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
1865 .xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
1866 .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
1867 .xlo_add_entry = ocfs2_xa_bucket_add_entry,
1868 .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
1869 .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
1870};
1871
1872static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
1873{
1874 struct ocfs2_xattr_value_buf vb;
1875
1876 if (ocfs2_xattr_is_local(loc->xl_entry))
1877 return 0;
1878
1879 ocfs2_xa_fill_value_buf(loc, &vb);
1880 return le32_to_cpu(vb.vb_xv->xr_clusters);
1881}
1882
1883static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
1884 struct ocfs2_xattr_set_ctxt *ctxt)
1885{
1886 int trunc_rc, access_rc;
1887 struct ocfs2_xattr_value_buf vb;
1888
1889 ocfs2_xa_fill_value_buf(loc, &vb);
1890 trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
1891 ctxt);
1892
1893 /*
1894 * The caller of ocfs2_xa_value_truncate() has already called
1895 * ocfs2_xa_journal_access on the loc. However, The truncate code
1896 * calls ocfs2_extend_trans(). This may commit the previous
1897 * transaction and open a new one. If this is a bucket, truncate
1898 * could leave only vb->vb_bh set up for journaling. Meanwhile,
1899 * the caller is expecting to dirty the entire bucket. So we must
1900 * reset the journal work. We do this even if truncate has failed,
1901 * as it could have failed after committing the extend.
1902 */
1903 access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
1904 OCFS2_JOURNAL_ACCESS_WRITE);
1905
1906 /* Errors in truncate take precedence */
1907 return trunc_rc ? trunc_rc : access_rc;
1908}
1909
1910static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
1911{
1912 int index, count;
1913 struct ocfs2_xattr_header *xh = loc->xl_header;
1914 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1915
1916 ocfs2_xa_wipe_namevalue(loc);
1917 loc->xl_entry = NULL;
1918
1919 le16_add_cpu(&xh->xh_count, -1);
1920 count = le16_to_cpu(xh->xh_count);
1921
1922 /*
1923 * Only zero out the entry if there are more remaining. This is
1924 * important for an empty bucket, as it keeps track of the
1925 * bucket's hash value. It doesn't hurt empty block storage.
1926 */
1927 if (count) {
1928 index = ((char *)entry - (char *)&xh->xh_entries) /
1929 sizeof(struct ocfs2_xattr_entry);
1930 memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
1931 (count - index) * sizeof(struct ocfs2_xattr_entry));
1932 memset(&xh->xh_entries[count], 0,
1933 sizeof(struct ocfs2_xattr_entry));
1934 }
1519} 1935}
1520 1936
1521/* 1937/*
1522 * ocfs2_xattr_set_entry() 1938 * If we have a problem adjusting the size of an external value during
1939 * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
1940 * in an intermediate state. For example, the value may be partially
1941 * truncated.
1942 *
1943 * If the value tree hasn't changed, the extend/truncate went nowhere.
1944 * We have nothing to do. The caller can treat it as a straight error.
1523 * 1945 *
1524 * Set extended attribute entry into inode or block. 1946 * If the value tree got partially truncated, we now have a corrupted
1947 * extended attribute. We're going to wipe its entry and leak the
1948 * clusters. Better to leak some storage than leave a corrupt entry.
1525 * 1949 *
1526 * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE, 1950 * If the value tree grew, it obviously didn't grow enough for the
1527 * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(), 1951 * new entry. We're not going to try and reclaim those clusters either.
1528 * then set value in B tree with set_value_outside(). 1952 * If there was already an external value there (orig_clusters != 0),
1953 * the new clusters are attached safely and we can just leave the old
1954 * value in place. If there was no external value there, we remove
1955 * the entry.
1956 *
1957 * This way, the xattr block we store in the journal will be consistent.
1958 * If the size change broke because of the journal, no changes will hit
1959 * disk anyway.
1529 */ 1960 */
1530static int ocfs2_xattr_set_entry(struct inode *inode, 1961static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
1531 struct ocfs2_xattr_info *xi, 1962 const char *what,
1532 struct ocfs2_xattr_search *xs, 1963 unsigned int orig_clusters)
1533 struct ocfs2_xattr_set_ctxt *ctxt, 1964{
1534 int flag) 1965 unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
1535{ 1966 char *nameval_buf = ocfs2_xa_offset_pointer(loc,
1536 struct ocfs2_xattr_entry *last; 1967 le16_to_cpu(loc->xl_entry->xe_name_offset));
1537 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1968
1538 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; 1969 if (new_clusters < orig_clusters) {
1539 size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name); 1970 mlog(ML_ERROR,
1540 size_t size_l = 0; 1971 "Partial truncate while %s xattr %.*s. Leaking "
1541 handle_t *handle = ctxt->handle; 1972 "%u clusters and removing the entry\n",
1542 int free, i, ret; 1973 what, loc->xl_entry->xe_name_len, nameval_buf,
1543 struct ocfs2_xattr_info xi_l = { 1974 orig_clusters - new_clusters);
1544 .name_index = xi->name_index, 1975 ocfs2_xa_remove_entry(loc);
1545 .name = xi->name, 1976 } else if (!orig_clusters) {
1546 .value = xi->value, 1977 mlog(ML_ERROR,
1547 .value_len = xi->value_len, 1978 "Unable to allocate an external value for xattr "
1548 }; 1979 "%.*s safely. Leaking %u clusters and removing the "
1549 struct ocfs2_xattr_value_buf vb = { 1980 "entry\n",
1550 .vb_bh = xs->xattr_bh, 1981 loc->xl_entry->xe_name_len, nameval_buf,
1551 .vb_access = ocfs2_journal_access_di, 1982 new_clusters - orig_clusters);
1552 }; 1983 ocfs2_xa_remove_entry(loc);
1984 } else if (new_clusters > orig_clusters)
1985 mlog(ML_ERROR,
1986 "Unable to grow xattr %.*s safely. %u new clusters "
1987 "have been added, but the value will not be "
1988 "modified\n",
1989 loc->xl_entry->xe_name_len, nameval_buf,
1990 new_clusters - orig_clusters);
1991}
1992
1993static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
1994 struct ocfs2_xattr_set_ctxt *ctxt)
1995{
1996 int rc = 0;
1997 unsigned int orig_clusters;
1998
1999 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
2000 orig_clusters = ocfs2_xa_value_clusters(loc);
2001 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2002 if (rc) {
2003 mlog_errno(rc);
2004 /*
2005 * Since this is remove, we can return 0 if
2006 * ocfs2_xa_cleanup_value_truncate() is going to
2007 * wipe the entry anyway. So we check the
2008 * cluster count as well.
2009 */
2010 if (orig_clusters != ocfs2_xa_value_clusters(loc))
2011 rc = 0;
2012 ocfs2_xa_cleanup_value_truncate(loc, "removing",
2013 orig_clusters);
2014 if (rc)
2015 goto out;
2016 }
2017 }
1553 2018
1554 if (!(flag & OCFS2_INLINE_XATTR_FL)) { 2019 ocfs2_xa_remove_entry(loc);
1555 BUG_ON(xs->xattr_bh == xs->inode_bh);
1556 vb.vb_access = ocfs2_journal_access_xb;
1557 } else
1558 BUG_ON(xs->xattr_bh != xs->inode_bh);
1559 2020
1560 /* Compute min_offs, last and free space. */ 2021out:
1561 last = xs->header->xh_entries; 2022 return rc;
2023}
1562 2024
1563 for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) { 2025static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
1564 size_t offs = le16_to_cpu(last->xe_name_offset); 2026{
1565 if (offs < min_offs) 2027 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
1566 min_offs = offs; 2028 char *nameval_buf;
1567 last += 1;
1568 }
1569 2029
1570 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP; 2030 nameval_buf = ocfs2_xa_offset_pointer(loc,
1571 if (free < 0) 2031 le16_to_cpu(loc->xl_entry->xe_name_offset));
1572 return -EIO; 2032 memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
2033}
1573 2034
1574 if (!xs->not_found) { 2035/*
1575 size_t size = 0; 2036 * Take an existing entry and make it ready for the new value. This
1576 if (ocfs2_xattr_is_local(xs->here)) 2037 * won't allocate space, but it may free space. It should be ready for
1577 size = OCFS2_XATTR_SIZE(name_len) + 2038 * ocfs2_xa_prepare_entry() to finish the work.
1578 OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); 2039 */
1579 else 2040static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
1580 size = OCFS2_XATTR_SIZE(name_len) + 2041 struct ocfs2_xattr_info *xi,
1581 OCFS2_XATTR_ROOT_SIZE; 2042 struct ocfs2_xattr_set_ctxt *ctxt)
1582 free += (size + sizeof(struct ocfs2_xattr_entry)); 2043{
1583 } 2044 int rc = 0;
1584 /* Check free space in inode or block */ 2045 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
1585 if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { 2046 unsigned int orig_clusters;
1586 if (free < sizeof(struct ocfs2_xattr_entry) + 2047 char *nameval_buf;
1587 OCFS2_XATTR_SIZE(name_len) + 2048 int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
1588 OCFS2_XATTR_ROOT_SIZE) { 2049 int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
1589 ret = -ENOSPC; 2050
1590 goto out; 2051 BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
2052 name_size);
2053
2054 nameval_buf = ocfs2_xa_offset_pointer(loc,
2055 le16_to_cpu(loc->xl_entry->xe_name_offset));
2056 if (xe_local) {
2057 memset(nameval_buf + name_size, 0,
2058 namevalue_size_xe(loc->xl_entry) - name_size);
2059 if (!xi_local)
2060 ocfs2_xa_install_value_root(loc);
2061 } else {
2062 orig_clusters = ocfs2_xa_value_clusters(loc);
2063 if (xi_local) {
2064 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2065 if (rc < 0)
2066 mlog_errno(rc);
2067 else
2068 memset(nameval_buf + name_size, 0,
2069 namevalue_size_xe(loc->xl_entry) -
2070 name_size);
2071 } else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
2072 xi->xi_value_len) {
2073 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
2074 ctxt);
2075 if (rc < 0)
2076 mlog_errno(rc);
1591 } 2077 }
1592 size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; 2078
1593 xi_l.value = (void *)&def_xv; 2079 if (rc) {
1594 xi_l.value_len = OCFS2_XATTR_ROOT_SIZE; 2080 ocfs2_xa_cleanup_value_truncate(loc, "reusing",
1595 } else if (xi->value) { 2081 orig_clusters);
1596 if (free < sizeof(struct ocfs2_xattr_entry) +
1597 OCFS2_XATTR_SIZE(name_len) +
1598 OCFS2_XATTR_SIZE(xi->value_len)) {
1599 ret = -ENOSPC;
1600 goto out; 2082 goto out;
1601 } 2083 }
1602 } 2084 }
1603 2085
1604 if (!xs->not_found) { 2086 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
1605 /* For existing extended attribute */ 2087 ocfs2_xattr_set_local(loc->xl_entry, xi_local);
1606 size_t size = OCFS2_XATTR_SIZE(name_len) +
1607 OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
1608 size_t offs = le16_to_cpu(xs->here->xe_name_offset);
1609 void *val = xs->base + offs;
1610 2088
1611 if (ocfs2_xattr_is_local(xs->here) && size == size_l) { 2089out:
1612 /* Replace existing local xattr with tree root */ 2090 return rc;
1613 ret = ocfs2_xattr_set_value_outside(inode, xi, xs, 2091}
1614 ctxt, &vb, offs);
1615 if (ret < 0)
1616 mlog_errno(ret);
1617 goto out;
1618 } else if (!ocfs2_xattr_is_local(xs->here)) {
1619 /* For existing xattr which has value outside */
1620 vb.vb_xv = (struct ocfs2_xattr_value_root *)
1621 (val + OCFS2_XATTR_SIZE(name_len));
1622 2092
1623 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { 2093/*
1624 /* 2094 * Prepares loc->xl_entry to receive the new xattr. This includes
1625 * If new value need set outside also, 2095 * properly setting up the name+value pair region. If loc->xl_entry
1626 * first truncate old value to new value, 2096 * already exists, it will take care of modifying it appropriately.
1627 * then set new value with set_value_outside(). 2097 *
1628 */ 2098 * Note that this modifies the data. You did journal_access already,
1629 ret = ocfs2_xattr_value_truncate(inode, 2099 * right?
1630 &vb, 2100 */
1631 xi->value_len, 2101static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
1632 ctxt); 2102 struct ocfs2_xattr_info *xi,
1633 if (ret < 0) { 2103 u32 name_hash,
1634 mlog_errno(ret); 2104 struct ocfs2_xattr_set_ctxt *ctxt)
1635 goto out; 2105{
1636 } 2106 int rc = 0;
2107 unsigned int orig_clusters;
2108 __le64 orig_value_size = 0;
1637 2109
1638 ret = ocfs2_xattr_update_entry(inode, 2110 rc = ocfs2_xa_check_space(loc, xi);
1639 handle, 2111 if (rc)
1640 xi, 2112 goto out;
1641 xs,
1642 &vb,
1643 offs);
1644 if (ret < 0) {
1645 mlog_errno(ret);
1646 goto out;
1647 }
1648 2113
1649 ret = __ocfs2_xattr_set_value_outside(inode, 2114 if (loc->xl_entry) {
1650 handle, 2115 if (ocfs2_xa_can_reuse_entry(loc, xi)) {
1651 &vb, 2116 orig_value_size = loc->xl_entry->xe_value_size;
1652 xi->value, 2117 rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
1653 xi->value_len); 2118 if (rc)
1654 if (ret < 0) 2119 goto out;
1655 mlog_errno(ret); 2120 goto alloc_value;
2121 }
2122
2123 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
2124 orig_clusters = ocfs2_xa_value_clusters(loc);
2125 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2126 if (rc) {
2127 mlog_errno(rc);
2128 ocfs2_xa_cleanup_value_truncate(loc,
2129 "overwriting",
2130 orig_clusters);
1656 goto out; 2131 goto out;
1657 } else {
1658 /*
1659 * If new value need set in local,
1660 * just trucate old value to zero.
1661 */
1662 ret = ocfs2_xattr_value_truncate(inode,
1663 &vb,
1664 0,
1665 ctxt);
1666 if (ret < 0)
1667 mlog_errno(ret);
1668 } 2132 }
1669 } 2133 }
2134 ocfs2_xa_wipe_namevalue(loc);
2135 } else
2136 ocfs2_xa_add_entry(loc, name_hash);
2137
2138 /*
2139 * If we get here, we have a blank entry. Fill it. We grow our
2140 * name+value pair back from the end.
2141 */
2142 ocfs2_xa_add_namevalue(loc, xi);
2143 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
2144 ocfs2_xa_install_value_root(loc);
2145
2146alloc_value:
2147 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
2148 orig_clusters = ocfs2_xa_value_clusters(loc);
2149 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
2150 if (rc < 0) {
2151 /*
2152 * If we tried to grow an existing external value,
2153 * ocfs2_xa_cleanuP-value_truncate() is going to
2154 * let it stand. We have to restore its original
2155 * value size.
2156 */
2157 loc->xl_entry->xe_value_size = orig_value_size;
2158 ocfs2_xa_cleanup_value_truncate(loc, "growing",
2159 orig_clusters);
2160 mlog_errno(rc);
2161 }
1670 } 2162 }
1671 2163
1672 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh, 2164out:
2165 return rc;
2166}
2167
2168/*
2169 * Store the value portion of the name+value pair. This will skip
2170 * values that are stored externally. Their tree roots were set up
2171 * by ocfs2_xa_prepare_entry().
2172 */
2173static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
2174 struct ocfs2_xattr_info *xi,
2175 struct ocfs2_xattr_set_ctxt *ctxt)
2176{
2177 int rc = 0;
2178 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
2179 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
2180 char *nameval_buf;
2181 struct ocfs2_xattr_value_buf vb;
2182
2183 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
2184 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
2185 ocfs2_xa_fill_value_buf(loc, &vb);
2186 rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
2187 ctxt->handle, &vb,
2188 xi->xi_value,
2189 xi->xi_value_len);
2190 } else
2191 memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
2192
2193 return rc;
2194}
2195
2196static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
2197 struct ocfs2_xattr_info *xi,
2198 struct ocfs2_xattr_set_ctxt *ctxt)
2199{
2200 int ret;
2201 u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
2202 xi->xi_name_len);
2203
2204 ret = ocfs2_xa_journal_access(ctxt->handle, loc,
1673 OCFS2_JOURNAL_ACCESS_WRITE); 2205 OCFS2_JOURNAL_ACCESS_WRITE);
1674 if (ret) { 2206 if (ret) {
1675 mlog_errno(ret); 2207 mlog_errno(ret);
1676 goto out; 2208 goto out;
1677 } 2209 }
1678 2210
1679 if (!(flag & OCFS2_INLINE_XATTR_FL)) {
1680 ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh,
1681 OCFS2_JOURNAL_ACCESS_WRITE);
1682 if (ret) {
1683 mlog_errno(ret);
1684 goto out;
1685 }
1686 }
1687
1688 /* 2211 /*
1689 * Set value in local, include set tree root in local. 2212 * From here on out, everything is going to modify the buffer a
1690 * This is the first step for value size >INLINE_SIZE. 2213 * little. Errors are going to leave the xattr header in a
2214 * sane state. Thus, even with errors we dirty the sucker.
1691 */ 2215 */
1692 ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs);
1693 2216
1694 if (!(flag & OCFS2_INLINE_XATTR_FL)) { 2217 /* Don't worry, we are never called with !xi_value and !xl_entry */
1695 ret = ocfs2_journal_dirty(handle, xs->xattr_bh); 2218 if (!xi->xi_value) {
1696 if (ret < 0) { 2219 ret = ocfs2_xa_remove(loc, ctxt);
1697 mlog_errno(ret); 2220 goto out_dirty;
1698 goto out;
1699 }
1700 } 2221 }
1701 2222
1702 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) && 2223 ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
1703 (flag & OCFS2_INLINE_XATTR_FL)) { 2224 if (ret) {
1704 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2225 if (ret != -ENOSPC)
1705 unsigned int xattrsize = osb->s_xattr_inline_size; 2226 mlog_errno(ret);
1706 2227 goto out_dirty;
1707 /*
1708 * Adjust extent record count or inline data size
1709 * to reserve space for extended attribute.
1710 */
1711 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1712 struct ocfs2_inline_data *idata = &di->id2.i_data;
1713 le16_add_cpu(&idata->id_count, -xattrsize);
1714 } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
1715 struct ocfs2_extent_list *el = &di->id2.i_list;
1716 le16_add_cpu(&el->l_count, -(xattrsize /
1717 sizeof(struct ocfs2_extent_rec)));
1718 }
1719 di->i_xattr_inline_size = cpu_to_le16(xattrsize);
1720 } 2228 }
1721 /* Update xattr flag */
1722 spin_lock(&oi->ip_lock);
1723 oi->ip_dyn_features |= flag;
1724 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
1725 spin_unlock(&oi->ip_lock);
1726 2229
1727 ret = ocfs2_journal_dirty(handle, xs->inode_bh); 2230 ret = ocfs2_xa_store_value(loc, xi, ctxt);
1728 if (ret < 0) 2231 if (ret)
1729 mlog_errno(ret); 2232 mlog_errno(ret);
1730 2233
1731 if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { 2234out_dirty:
1732 /* 2235 ocfs2_xa_journal_dirty(ctxt->handle, loc);
1733 * Set value outside in B tree.
1734 * This is the second step for value size > INLINE_SIZE.
1735 */
1736 size_t offs = le16_to_cpu(xs->here->xe_name_offset);
1737 ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt,
1738 &vb, offs);
1739 if (ret < 0) {
1740 int ret2;
1741 2236
1742 mlog_errno(ret);
1743 /*
1744 * If set value outside failed, we have to clean
1745 * the junk tree root we have already set in local.
1746 */
1747 ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle,
1748 xi, xs, &vb, offs);
1749 if (ret2 < 0)
1750 mlog_errno(ret2);
1751 }
1752 }
1753out: 2237out:
1754 return ret; 2238 return ret;
1755} 2239}
1756 2240
2241static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
2242 struct inode *inode,
2243 struct buffer_head *bh,
2244 struct ocfs2_xattr_entry *entry)
2245{
2246 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
2247
2248 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
2249
2250 loc->xl_inode = inode;
2251 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2252 loc->xl_storage = bh;
2253 loc->xl_entry = entry;
2254 loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
2255 loc->xl_header =
2256 (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
2257 loc->xl_size);
2258}
2259
2260static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
2261 struct inode *inode,
2262 struct buffer_head *bh,
2263 struct ocfs2_xattr_entry *entry)
2264{
2265 struct ocfs2_xattr_block *xb =
2266 (struct ocfs2_xattr_block *)bh->b_data;
2267
2268 BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
2269
2270 loc->xl_inode = inode;
2271 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2272 loc->xl_storage = bh;
2273 loc->xl_header = &(xb->xb_attrs.xb_header);
2274 loc->xl_entry = entry;
2275 loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
2276 xb_attrs.xb_header);
2277}
2278
2279static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
2280 struct ocfs2_xattr_bucket *bucket,
2281 struct ocfs2_xattr_entry *entry)
2282{
2283 loc->xl_inode = bucket->bu_inode;
2284 loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
2285 loc->xl_storage = bucket;
2286 loc->xl_header = bucket_xh(bucket);
2287 loc->xl_entry = entry;
2288 loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
2289}
2290
1757/* 2291/*
1758 * In xattr remove, if it is stored outside and refcounted, we may have 2292 * In xattr remove, if it is stored outside and refcounted, we may have
1759 * the chance to split the refcount tree. So need the allocators. 2293 * the chance to split the refcount tree. So need the allocators.
@@ -2149,6 +2683,55 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
2149 return 0; 2683 return 0;
2150} 2684}
2151 2685
2686static int ocfs2_xattr_ibody_init(struct inode *inode,
2687 struct buffer_head *di_bh,
2688 struct ocfs2_xattr_set_ctxt *ctxt)
2689{
2690 int ret;
2691 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2692 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2693 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2694 unsigned int xattrsize = osb->s_xattr_inline_size;
2695
2696 if (!ocfs2_xattr_has_space_inline(inode, di)) {
2697 ret = -ENOSPC;
2698 goto out;
2699 }
2700
2701 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
2702 OCFS2_JOURNAL_ACCESS_WRITE);
2703 if (ret) {
2704 mlog_errno(ret);
2705 goto out;
2706 }
2707
2708 /*
2709 * Adjust extent record count or inline data size
2710 * to reserve space for extended attribute.
2711 */
2712 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2713 struct ocfs2_inline_data *idata = &di->id2.i_data;
2714 le16_add_cpu(&idata->id_count, -xattrsize);
2715 } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
2716 struct ocfs2_extent_list *el = &di->id2.i_list;
2717 le16_add_cpu(&el->l_count, -(xattrsize /
2718 sizeof(struct ocfs2_extent_rec)));
2719 }
2720 di->i_xattr_inline_size = cpu_to_le16(xattrsize);
2721
2722 spin_lock(&oi->ip_lock);
2723 oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
2724 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2725 spin_unlock(&oi->ip_lock);
2726
2727 ret = ocfs2_journal_dirty(ctxt->handle, di_bh);
2728 if (ret < 0)
2729 mlog_errno(ret);
2730
2731out:
2732 return ret;
2733}
2734
2152/* 2735/*
2153 * ocfs2_xattr_ibody_set() 2736 * ocfs2_xattr_ibody_set()
2154 * 2737 *
@@ -2160,9 +2743,10 @@ static int ocfs2_xattr_ibody_set(struct inode *inode,
2160 struct ocfs2_xattr_search *xs, 2743 struct ocfs2_xattr_search *xs,
2161 struct ocfs2_xattr_set_ctxt *ctxt) 2744 struct ocfs2_xattr_set_ctxt *ctxt)
2162{ 2745{
2746 int ret;
2163 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2747 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2164 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; 2748 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2165 int ret; 2749 struct ocfs2_xa_loc loc;
2166 2750
2167 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) 2751 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2168 return -ENOSPC; 2752 return -ENOSPC;
@@ -2175,8 +2759,25 @@ static int ocfs2_xattr_ibody_set(struct inode *inode,
2175 } 2759 }
2176 } 2760 }
2177 2761
2178 ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, 2762 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
2179 (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL)); 2763 ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
2764 if (ret) {
2765 if (ret != -ENOSPC)
2766 mlog_errno(ret);
2767 goto out;
2768 }
2769 }
2770
2771 ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
2772 xs->not_found ? NULL : xs->here);
2773 ret = ocfs2_xa_set(&loc, xi, ctxt);
2774 if (ret) {
2775 if (ret != -ENOSPC)
2776 mlog_errno(ret);
2777 goto out;
2778 }
2779 xs->here = loc.xl_entry;
2780
2180out: 2781out:
2181 up_write(&oi->ip_alloc_sem); 2782 up_write(&oi->ip_alloc_sem);
2182 2783
@@ -2236,12 +2837,11 @@ cleanup:
2236 return ret; 2837 return ret;
2237} 2838}
2238 2839
2239static int ocfs2_create_xattr_block(handle_t *handle, 2840static int ocfs2_create_xattr_block(struct inode *inode,
2240 struct inode *inode,
2241 struct buffer_head *inode_bh, 2841 struct buffer_head *inode_bh,
2242 struct ocfs2_alloc_context *meta_ac, 2842 struct ocfs2_xattr_set_ctxt *ctxt,
2243 struct buffer_head **ret_bh, 2843 int indexed,
2244 int indexed) 2844 struct buffer_head **ret_bh)
2245{ 2845{
2246 int ret; 2846 int ret;
2247 u16 suballoc_bit_start; 2847 u16 suballoc_bit_start;
@@ -2252,14 +2852,14 @@ static int ocfs2_create_xattr_block(handle_t *handle,
2252 struct buffer_head *new_bh = NULL; 2852 struct buffer_head *new_bh = NULL;
2253 struct ocfs2_xattr_block *xblk; 2853 struct ocfs2_xattr_block *xblk;
2254 2854
2255 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh, 2855 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
2256 OCFS2_JOURNAL_ACCESS_CREATE); 2856 inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
2257 if (ret < 0) { 2857 if (ret < 0) {
2258 mlog_errno(ret); 2858 mlog_errno(ret);
2259 goto end; 2859 goto end;
2260 } 2860 }
2261 2861
2262 ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, 2862 ret = ocfs2_claim_metadata(osb, ctxt->handle, ctxt->meta_ac, 1,
2263 &suballoc_bit_start, &num_got, 2863 &suballoc_bit_start, &num_got,
2264 &first_blkno); 2864 &first_blkno);
2265 if (ret < 0) { 2865 if (ret < 0) {
@@ -2270,7 +2870,7 @@ static int ocfs2_create_xattr_block(handle_t *handle,
2270 new_bh = sb_getblk(inode->i_sb, first_blkno); 2870 new_bh = sb_getblk(inode->i_sb, first_blkno);
2271 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); 2871 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2272 2872
2273 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), 2873 ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
2274 new_bh, 2874 new_bh,
2275 OCFS2_JOURNAL_ACCESS_CREATE); 2875 OCFS2_JOURNAL_ACCESS_CREATE);
2276 if (ret < 0) { 2876 if (ret < 0) {
@@ -2282,11 +2882,10 @@ static int ocfs2_create_xattr_block(handle_t *handle,
2282 xblk = (struct ocfs2_xattr_block *)new_bh->b_data; 2882 xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
2283 memset(xblk, 0, inode->i_sb->s_blocksize); 2883 memset(xblk, 0, inode->i_sb->s_blocksize);
2284 strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE); 2884 strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
2285 xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num); 2885 xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
2286 xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start); 2886 xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
2287 xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation); 2887 xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
2288 xblk->xb_blkno = cpu_to_le64(first_blkno); 2888 xblk->xb_blkno = cpu_to_le64(first_blkno);
2289
2290 if (indexed) { 2889 if (indexed) {
2291 struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root; 2890 struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
2292 xr->xt_clusters = cpu_to_le32(1); 2891 xr->xt_clusters = cpu_to_le32(1);
@@ -2297,14 +2896,17 @@ static int ocfs2_create_xattr_block(handle_t *handle,
2297 xr->xt_list.l_next_free_rec = cpu_to_le16(1); 2896 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
2298 xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED); 2897 xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
2299 } 2898 }
2899 ocfs2_journal_dirty(ctxt->handle, new_bh);
2300 2900
2301 ret = ocfs2_journal_dirty(handle, new_bh); 2901 /* Add it to the inode */
2302 if (ret < 0) {
2303 mlog_errno(ret);
2304 goto end;
2305 }
2306 di->i_xattr_loc = cpu_to_le64(first_blkno); 2902 di->i_xattr_loc = cpu_to_le64(first_blkno);
2307 ocfs2_journal_dirty(handle, inode_bh); 2903
2904 spin_lock(&OCFS2_I(inode)->ip_lock);
2905 OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
2906 di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
2907 spin_unlock(&OCFS2_I(inode)->ip_lock);
2908
2909 ocfs2_journal_dirty(ctxt->handle, inode_bh);
2308 2910
2309 *ret_bh = new_bh; 2911 *ret_bh = new_bh;
2310 new_bh = NULL; 2912 new_bh = NULL;
@@ -2326,13 +2928,13 @@ static int ocfs2_xattr_block_set(struct inode *inode,
2326 struct ocfs2_xattr_set_ctxt *ctxt) 2928 struct ocfs2_xattr_set_ctxt *ctxt)
2327{ 2929{
2328 struct buffer_head *new_bh = NULL; 2930 struct buffer_head *new_bh = NULL;
2329 handle_t *handle = ctxt->handle;
2330 struct ocfs2_xattr_block *xblk = NULL; 2931 struct ocfs2_xattr_block *xblk = NULL;
2331 int ret; 2932 int ret;
2933 struct ocfs2_xa_loc loc;
2332 2934
2333 if (!xs->xattr_bh) { 2935 if (!xs->xattr_bh) {
2334 ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh, 2936 ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
2335 ctxt->meta_ac, &new_bh, 0); 2937 0, &new_bh);
2336 if (ret) { 2938 if (ret) {
2337 mlog_errno(ret); 2939 mlog_errno(ret);
2338 goto end; 2940 goto end;
@@ -2348,21 +2950,25 @@ static int ocfs2_xattr_block_set(struct inode *inode,
2348 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; 2950 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
2349 2951
2350 if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) { 2952 if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
2351 /* Set extended attribute into external block */ 2953 ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
2352 ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, 2954 xs->not_found ? NULL : xs->here);
2353 OCFS2_HAS_XATTR_FL);
2354 if (!ret || ret != -ENOSPC)
2355 goto end;
2356 2955
2357 ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); 2956 ret = ocfs2_xa_set(&loc, xi, ctxt);
2358 if (ret) 2957 if (!ret)
2958 xs->here = loc.xl_entry;
2959 else if (ret != -ENOSPC)
2359 goto end; 2960 goto end;
2961 else {
2962 ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
2963 if (ret)
2964 goto end;
2965 }
2360 } 2966 }
2361 2967
2362 ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); 2968 if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
2969 ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
2363 2970
2364end: 2971end:
2365
2366 return ret; 2972 return ret;
2367} 2973}
2368 2974
@@ -2371,7 +2977,6 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
2371 struct ocfs2_xattr_info *xi, 2977 struct ocfs2_xattr_info *xi,
2372 struct ocfs2_xattr_search *xs) 2978 struct ocfs2_xattr_search *xs)
2373{ 2979{
2374 u64 value_size;
2375 struct ocfs2_xattr_entry *last; 2980 struct ocfs2_xattr_entry *last;
2376 int free, i; 2981 int free, i;
2377 size_t min_offs = xs->end - xs->base; 2982 size_t min_offs = xs->end - xs->base;
@@ -2394,13 +2999,7 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
2394 2999
2395 BUG_ON(!xs->not_found); 3000 BUG_ON(!xs->not_found);
2396 3001
2397 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) 3002 if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
2398 value_size = OCFS2_XATTR_ROOT_SIZE;
2399 else
2400 value_size = OCFS2_XATTR_SIZE(xi->value_len);
2401
2402 if (free >= sizeof(struct ocfs2_xattr_entry) +
2403 OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size)
2404 return 1; 3003 return 1;
2405 3004
2406 return 0; 3005 return 0;
@@ -2424,7 +3023,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
2424 char *base = NULL; 3023 char *base = NULL;
2425 int name_offset, name_len = 0; 3024 int name_offset, name_len = 0;
2426 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, 3025 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
2427 xi->value_len); 3026 xi->xi_value_len);
2428 u64 value_size; 3027 u64 value_size;
2429 3028
2430 /* 3029 /*
@@ -2432,14 +3031,14 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
2432 * No matter whether we replace an old one or add a new one, 3031 * No matter whether we replace an old one or add a new one,
2433 * we need this for writing. 3032 * we need this for writing.
2434 */ 3033 */
2435 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) 3034 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
2436 credits += new_clusters * 3035 credits += new_clusters *
2437 ocfs2_clusters_to_blocks(inode->i_sb, 1); 3036 ocfs2_clusters_to_blocks(inode->i_sb, 1);
2438 3037
2439 if (xis->not_found && xbs->not_found) { 3038 if (xis->not_found && xbs->not_found) {
2440 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); 3039 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
2441 3040
2442 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { 3041 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
2443 clusters_add += new_clusters; 3042 clusters_add += new_clusters;
2444 credits += ocfs2_calc_extend_credits(inode->i_sb, 3043 credits += ocfs2_calc_extend_credits(inode->i_sb,
2445 &def_xv.xv.xr_list, 3044 &def_xv.xv.xr_list,
@@ -2484,7 +3083,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
2484 * The credits for removing the value tree will be extended 3083 * The credits for removing the value tree will be extended
2485 * by ocfs2_remove_extent itself. 3084 * by ocfs2_remove_extent itself.
2486 */ 3085 */
2487 if (!xi->value) { 3086 if (!xi->xi_value) {
2488 if (!ocfs2_xattr_is_local(xe)) 3087 if (!ocfs2_xattr_is_local(xe))
2489 credits += ocfs2_remove_extent_credits(inode->i_sb); 3088 credits += ocfs2_remove_extent_credits(inode->i_sb);
2490 3089
@@ -2514,7 +3113,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
2514 } 3113 }
2515 } 3114 }
2516 3115
2517 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { 3116 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
2518 /* the new values will be stored outside. */ 3117 /* the new values will be stored outside. */
2519 u32 old_clusters = 0; 3118 u32 old_clusters = 0;
2520 3119
@@ -2547,9 +3146,10 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
2547 * value, we don't need any allocation, otherwise we have 3146 * value, we don't need any allocation, otherwise we have
2548 * to guess metadata allocation. 3147 * to guess metadata allocation.
2549 */ 3148 */
2550 if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) || 3149 if ((ocfs2_xattr_is_local(xe) &&
3150 (value_size >= xi->xi_value_len)) ||
2551 (!ocfs2_xattr_is_local(xe) && 3151 (!ocfs2_xattr_is_local(xe) &&
2552 OCFS2_XATTR_ROOT_SIZE >= xi->value_len)) 3152 OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
2553 goto out; 3153 goto out;
2554 } 3154 }
2555 3155
@@ -2639,7 +3239,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
2639 3239
2640 meta_add += extra_meta; 3240 meta_add += extra_meta;
2641 mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " 3241 mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
2642 "credits = %d\n", xi->name, meta_add, clusters_add, *credits); 3242 "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits);
2643 3243
2644 if (meta_add) { 3244 if (meta_add) {
2645 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, 3245 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
@@ -2679,7 +3279,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
2679{ 3279{
2680 int ret = 0, credits, old_found; 3280 int ret = 0, credits, old_found;
2681 3281
2682 if (!xi->value) { 3282 if (!xi->xi_value) {
2683 /* Remove existing extended attribute */ 3283 /* Remove existing extended attribute */
2684 if (!xis->not_found) 3284 if (!xis->not_found)
2685 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); 3285 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
@@ -2693,8 +3293,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
2693 * If succeed and that extended attribute existing in 3293 * If succeed and that extended attribute existing in
2694 * external block, then we will remove it. 3294 * external block, then we will remove it.
2695 */ 3295 */
2696 xi->value = NULL; 3296 xi->xi_value = NULL;
2697 xi->value_len = 0; 3297 xi->xi_value_len = 0;
2698 3298
2699 old_found = xis->not_found; 3299 old_found = xis->not_found;
2700 xis->not_found = -ENODATA; 3300 xis->not_found = -ENODATA;
@@ -2722,8 +3322,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
2722 } else if (ret == -ENOSPC) { 3322 } else if (ret == -ENOSPC) {
2723 if (di->i_xattr_loc && !xbs->xattr_bh) { 3323 if (di->i_xattr_loc && !xbs->xattr_bh) {
2724 ret = ocfs2_xattr_block_find(inode, 3324 ret = ocfs2_xattr_block_find(inode,
2725 xi->name_index, 3325 xi->xi_name_index,
2726 xi->name, xbs); 3326 xi->xi_name, xbs);
2727 if (ret) 3327 if (ret)
2728 goto out; 3328 goto out;
2729 3329
@@ -2762,8 +3362,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
2762 * If succeed and that extended attribute 3362 * If succeed and that extended attribute
2763 * existing in inode, we will remove it. 3363 * existing in inode, we will remove it.
2764 */ 3364 */
2765 xi->value = NULL; 3365 xi->xi_value = NULL;
2766 xi->value_len = 0; 3366 xi->xi_value_len = 0;
2767 xbs->not_found = -ENODATA; 3367 xbs->not_found = -ENODATA;
2768 ret = ocfs2_calc_xattr_set_need(inode, 3368 ret = ocfs2_calc_xattr_set_need(inode,
2769 di, 3369 di,
@@ -2829,10 +3429,11 @@ int ocfs2_xattr_set_handle(handle_t *handle,
2829 int ret; 3429 int ret;
2830 3430
2831 struct ocfs2_xattr_info xi = { 3431 struct ocfs2_xattr_info xi = {
2832 .name_index = name_index, 3432 .xi_name_index = name_index,
2833 .name = name, 3433 .xi_name = name,
2834 .value = value, 3434 .xi_name_len = strlen(name),
2835 .value_len = value_len, 3435 .xi_value = value,
3436 .xi_value_len = value_len,
2836 }; 3437 };
2837 3438
2838 struct ocfs2_xattr_search xis = { 3439 struct ocfs2_xattr_search xis = {
@@ -2912,10 +3513,11 @@ int ocfs2_xattr_set(struct inode *inode,
2912 struct ocfs2_refcount_tree *ref_tree = NULL; 3513 struct ocfs2_refcount_tree *ref_tree = NULL;
2913 3514
2914 struct ocfs2_xattr_info xi = { 3515 struct ocfs2_xattr_info xi = {
2915 .name_index = name_index, 3516 .xi_name_index = name_index,
2916 .name = name, 3517 .xi_name = name,
2917 .value = value, 3518 .xi_name_len = strlen(name),
2918 .value_len = value_len, 3519 .xi_value = value,
3520 .xi_value_len = value_len,
2919 }; 3521 };
2920 3522
2921 struct ocfs2_xattr_search xis = { 3523 struct ocfs2_xattr_search xis = {
@@ -3759,7 +4361,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
3759 struct ocfs2_xattr_bucket *bucket) 4361 struct ocfs2_xattr_bucket *bucket)
3760{ 4362{
3761 int ret, i; 4363 int ret, i;
3762 size_t end, offset, len, value_len; 4364 size_t end, offset, len;
3763 struct ocfs2_xattr_header *xh; 4365 struct ocfs2_xattr_header *xh;
3764 char *entries, *buf, *bucket_buf = NULL; 4366 char *entries, *buf, *bucket_buf = NULL;
3765 u64 blkno = bucket_blkno(bucket); 4367 u64 blkno = bucket_blkno(bucket);
@@ -3813,12 +4415,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
3813 end = OCFS2_XATTR_BUCKET_SIZE; 4415 end = OCFS2_XATTR_BUCKET_SIZE;
3814 for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) { 4416 for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
3815 offset = le16_to_cpu(xe->xe_name_offset); 4417 offset = le16_to_cpu(xe->xe_name_offset);
3816 if (ocfs2_xattr_is_local(xe)) 4418 len = namevalue_size_xe(xe);
3817 value_len = OCFS2_XATTR_SIZE(
3818 le64_to_cpu(xe->xe_value_size));
3819 else
3820 value_len = OCFS2_XATTR_ROOT_SIZE;
3821 len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len;
3822 4419
3823 /* 4420 /*
3824 * We must make sure that the name/value pair 4421 * We must make sure that the name/value pair
@@ -4007,7 +4604,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
4007 int new_bucket_head) 4604 int new_bucket_head)
4008{ 4605{
4009 int ret, i; 4606 int ret, i;
4010 int count, start, len, name_value_len = 0, xe_len, name_offset = 0; 4607 int count, start, len, name_value_len = 0, name_offset = 0;
4011 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL; 4608 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
4012 struct ocfs2_xattr_header *xh; 4609 struct ocfs2_xattr_header *xh;
4013 struct ocfs2_xattr_entry *xe; 4610 struct ocfs2_xattr_entry *xe;
@@ -4098,13 +4695,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
4098 name_value_len = 0; 4695 name_value_len = 0;
4099 for (i = 0; i < start; i++) { 4696 for (i = 0; i < start; i++) {
4100 xe = &xh->xh_entries[i]; 4697 xe = &xh->xh_entries[i];
4101 xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len); 4698 name_value_len += namevalue_size_xe(xe);
4102 if (ocfs2_xattr_is_local(xe))
4103 xe_len +=
4104 OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
4105 else
4106 xe_len += OCFS2_XATTR_ROOT_SIZE;
4107 name_value_len += xe_len;
4108 if (le16_to_cpu(xe->xe_name_offset) < name_offset) 4699 if (le16_to_cpu(xe->xe_name_offset) < name_offset)
4109 name_offset = le16_to_cpu(xe->xe_name_offset); 4700 name_offset = le16_to_cpu(xe->xe_name_offset);
4110 } 4701 }
@@ -4134,12 +4725,6 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
4134 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); 4725 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
4135 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { 4726 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
4136 xe = &xh->xh_entries[i]; 4727 xe = &xh->xh_entries[i];
4137 xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
4138 if (ocfs2_xattr_is_local(xe))
4139 xe_len +=
4140 OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
4141 else
4142 xe_len += OCFS2_XATTR_ROOT_SIZE;
4143 if (le16_to_cpu(xe->xe_name_offset) < 4728 if (le16_to_cpu(xe->xe_name_offset) <
4144 le16_to_cpu(xh->xh_free_start)) 4729 le16_to_cpu(xh->xh_free_start))
4145 xh->xh_free_start = xe->xe_name_offset; 4730 xh->xh_free_start = xe->xe_name_offset;
@@ -4751,195 +5336,6 @@ static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
4751} 5336}
4752 5337
4753/* 5338/*
4754 * Handle the normal xattr set, including replace, delete and new.
4755 *
4756 * Note: "local" indicates the real data's locality. So we can't
4757 * just its bucket locality by its length.
4758 */
4759static void ocfs2_xattr_set_entry_normal(struct inode *inode,
4760 struct ocfs2_xattr_info *xi,
4761 struct ocfs2_xattr_search *xs,
4762 u32 name_hash,
4763 int local)
4764{
4765 struct ocfs2_xattr_entry *last, *xe;
4766 int name_len = strlen(xi->name);
4767 struct ocfs2_xattr_header *xh = xs->header;
4768 u16 count = le16_to_cpu(xh->xh_count), start;
4769 size_t blocksize = inode->i_sb->s_blocksize;
4770 char *val;
4771 size_t offs, size, new_size;
4772
4773 last = &xh->xh_entries[count];
4774 if (!xs->not_found) {
4775 xe = xs->here;
4776 offs = le16_to_cpu(xe->xe_name_offset);
4777 if (ocfs2_xattr_is_local(xe))
4778 size = OCFS2_XATTR_SIZE(name_len) +
4779 OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
4780 else
4781 size = OCFS2_XATTR_SIZE(name_len) +
4782 OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
4783
4784 /*
4785 * If the new value will be stored outside, xi->value has been
4786 * initalized as an empty ocfs2_xattr_value_root, and the same
4787 * goes with xi->value_len, so we can set new_size safely here.
4788 * See ocfs2_xattr_set_in_bucket.
4789 */
4790 new_size = OCFS2_XATTR_SIZE(name_len) +
4791 OCFS2_XATTR_SIZE(xi->value_len);
4792
4793 le16_add_cpu(&xh->xh_name_value_len, -size);
4794 if (xi->value) {
4795 if (new_size > size)
4796 goto set_new_name_value;
4797
4798 /* Now replace the old value with new one. */
4799 if (local)
4800 xe->xe_value_size = cpu_to_le64(xi->value_len);
4801 else
4802 xe->xe_value_size = 0;
4803
4804 val = ocfs2_xattr_bucket_get_val(inode,
4805 xs->bucket, offs);
4806 memset(val + OCFS2_XATTR_SIZE(name_len), 0,
4807 size - OCFS2_XATTR_SIZE(name_len));
4808 if (OCFS2_XATTR_SIZE(xi->value_len) > 0)
4809 memcpy(val + OCFS2_XATTR_SIZE(name_len),
4810 xi->value, xi->value_len);
4811
4812 le16_add_cpu(&xh->xh_name_value_len, new_size);
4813 ocfs2_xattr_set_local(xe, local);
4814 return;
4815 } else {
4816 /*
4817 * Remove the old entry if there is more than one.
4818 * We don't remove the last entry so that we can
4819 * use it to indicate the hash value of the empty
4820 * bucket.
4821 */
4822 last -= 1;
4823 le16_add_cpu(&xh->xh_count, -1);
4824 if (xh->xh_count) {
4825 memmove(xe, xe + 1,
4826 (void *)last - (void *)xe);
4827 memset(last, 0,
4828 sizeof(struct ocfs2_xattr_entry));
4829 } else
4830 xh->xh_free_start =
4831 cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
4832
4833 return;
4834 }
4835 } else {
4836 /* find a new entry for insert. */
4837 int low = 0, high = count - 1, tmp;
4838 struct ocfs2_xattr_entry *tmp_xe;
4839
4840 while (low <= high && count) {
4841 tmp = (low + high) / 2;
4842 tmp_xe = &xh->xh_entries[tmp];
4843
4844 if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
4845 low = tmp + 1;
4846 else if (name_hash <
4847 le32_to_cpu(tmp_xe->xe_name_hash))
4848 high = tmp - 1;
4849 else {
4850 low = tmp;
4851 break;
4852 }
4853 }
4854
4855 xe = &xh->xh_entries[low];
4856 if (low != count)
4857 memmove(xe + 1, xe, (void *)last - (void *)xe);
4858
4859 le16_add_cpu(&xh->xh_count, 1);
4860 memset(xe, 0, sizeof(struct ocfs2_xattr_entry));
4861 xe->xe_name_hash = cpu_to_le32(name_hash);
4862 xe->xe_name_len = name_len;
4863 ocfs2_xattr_set_type(xe, xi->name_index);
4864 }
4865
4866set_new_name_value:
4867 /* Insert the new name+value. */
4868 size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len);
4869
4870 /*
4871 * We must make sure that the name/value pair
4872 * exists in the same block.
4873 */
4874 offs = le16_to_cpu(xh->xh_free_start);
4875 start = offs - size;
4876
4877 if (start >> inode->i_sb->s_blocksize_bits !=
4878 (offs - 1) >> inode->i_sb->s_blocksize_bits) {
4879 offs = offs - offs % blocksize;
4880 xh->xh_free_start = cpu_to_le16(offs);
4881 }
4882
4883 val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size);
4884 xe->xe_name_offset = cpu_to_le16(offs - size);
4885
4886 memset(val, 0, size);
4887 memcpy(val, xi->name, name_len);
4888 memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len);
4889
4890 xe->xe_value_size = cpu_to_le64(xi->value_len);
4891 ocfs2_xattr_set_local(xe, local);
4892 xs->here = xe;
4893 le16_add_cpu(&xh->xh_free_start, -size);
4894 le16_add_cpu(&xh->xh_name_value_len, size);
4895
4896 return;
4897}
4898
4899/*
4900 * Set the xattr entry in the specified bucket.
4901 * The bucket is indicated by xs->bucket and it should have the enough
4902 * space for the xattr insertion.
4903 */
4904static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode,
4905 handle_t *handle,
4906 struct ocfs2_xattr_info *xi,
4907 struct ocfs2_xattr_search *xs,
4908 u32 name_hash,
4909 int local)
4910{
4911 int ret;
4912 u64 blkno;
4913
4914 mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n",
4915 (unsigned long)xi->value_len, xi->name_index,
4916 (unsigned long long)bucket_blkno(xs->bucket));
4917
4918 if (!xs->bucket->bu_bhs[1]) {
4919 blkno = bucket_blkno(xs->bucket);
4920 ocfs2_xattr_bucket_relse(xs->bucket);
4921 ret = ocfs2_read_xattr_bucket(xs->bucket, blkno);
4922 if (ret) {
4923 mlog_errno(ret);
4924 goto out;
4925 }
4926 }
4927
4928 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
4929 OCFS2_JOURNAL_ACCESS_WRITE);
4930 if (ret < 0) {
4931 mlog_errno(ret);
4932 goto out;
4933 }
4934
4935 ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local);
4936 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
4937
4938out:
4939 return ret;
4940}
4941
4942/*
4943 * Truncate the specified xe_off entry in xattr bucket. 5339 * Truncate the specified xe_off entry in xattr bucket.
4944 * bucket is indicated by header_bh and len is the new length. 5340 * bucket is indicated by header_bh and len is the new length.
4945 * Both the ocfs2_xattr_value_root and the entry will be updated here. 5341 * Both the ocfs2_xattr_value_root and the entry will be updated here.
@@ -5009,66 +5405,6 @@ out:
5009 return ret; 5405 return ret;
5010} 5406}
5011 5407
5012static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode,
5013 struct ocfs2_xattr_search *xs,
5014 int len,
5015 struct ocfs2_xattr_set_ctxt *ctxt)
5016{
5017 int ret, offset;
5018 struct ocfs2_xattr_entry *xe = xs->here;
5019 struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base;
5020
5021 BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe));
5022
5023 offset = xe - xh->xh_entries;
5024 ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket,
5025 offset, len, ctxt);
5026 if (ret)
5027 mlog_errno(ret);
5028
5029 return ret;
5030}
5031
5032static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
5033 handle_t *handle,
5034 struct ocfs2_xattr_search *xs,
5035 char *val,
5036 int value_len)
5037{
5038 int ret, offset, block_off;
5039 struct ocfs2_xattr_value_root *xv;
5040 struct ocfs2_xattr_entry *xe = xs->here;
5041 struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
5042 void *base;
5043 struct ocfs2_xattr_value_buf vb = {
5044 .vb_access = ocfs2_journal_access,
5045 };
5046
5047 BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe));
5048
5049 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh,
5050 xe - xh->xh_entries,
5051 &block_off,
5052 &offset);
5053 if (ret) {
5054 mlog_errno(ret);
5055 goto out;
5056 }
5057
5058 base = bucket_block(xs->bucket, block_off);
5059 xv = (struct ocfs2_xattr_value_root *)(base + offset +
5060 OCFS2_XATTR_SIZE(xe->xe_name_len));
5061
5062 vb.vb_xv = xv;
5063 vb.vb_bh = xs->bucket->bu_bhs[block_off];
5064 ret = __ocfs2_xattr_set_value_outside(inode, handle,
5065 &vb, val, value_len);
5066 if (ret)
5067 mlog_errno(ret);
5068out:
5069 return ret;
5070}
5071
5072static int ocfs2_rm_xattr_cluster(struct inode *inode, 5408static int ocfs2_rm_xattr_cluster(struct inode *inode,
5073 struct buffer_head *root_bh, 5409 struct buffer_head *root_bh,
5074 u64 blkno, 5410 u64 blkno,
@@ -5167,128 +5503,6 @@ out:
5167 return ret; 5503 return ret;
5168} 5504}
5169 5505
5170static void ocfs2_xattr_bucket_remove_xs(struct inode *inode,
5171 handle_t *handle,
5172 struct ocfs2_xattr_search *xs)
5173{
5174 struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
5175 struct ocfs2_xattr_entry *last = &xh->xh_entries[
5176 le16_to_cpu(xh->xh_count) - 1];
5177 int ret = 0;
5178
5179 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
5180 OCFS2_JOURNAL_ACCESS_WRITE);
5181 if (ret) {
5182 mlog_errno(ret);
5183 return;
5184 }
5185
5186 /* Remove the old entry. */
5187 memmove(xs->here, xs->here + 1,
5188 (void *)last - (void *)xs->here);
5189 memset(last, 0, sizeof(struct ocfs2_xattr_entry));
5190 le16_add_cpu(&xh->xh_count, -1);
5191
5192 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
5193}
5194
5195/*
5196 * Set the xattr name/value in the bucket specified in xs.
5197 *
5198 * As the new value in xi may be stored in the bucket or in an outside cluster,
5199 * we divide the whole process into 3 steps:
5200 * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket)
5201 * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs)
5202 * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside)
5203 * 4. If the clusters for the new outside value can't be allocated, we need
5204 * to free the xattr we allocated in set.
5205 */
5206static int ocfs2_xattr_set_in_bucket(struct inode *inode,
5207 struct ocfs2_xattr_info *xi,
5208 struct ocfs2_xattr_search *xs,
5209 struct ocfs2_xattr_set_ctxt *ctxt)
5210{
5211 int ret, local = 1;
5212 size_t value_len;
5213 char *val = (char *)xi->value;
5214 struct ocfs2_xattr_entry *xe = xs->here;
5215 u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name,
5216 strlen(xi->name));
5217
5218 if (!xs->not_found && !ocfs2_xattr_is_local(xe)) {
5219 /*
5220 * We need to truncate the xattr storage first.
5221 *
5222 * If both the old and new value are stored to
5223 * outside block, we only need to truncate
5224 * the storage and then set the value outside.
5225 *
5226 * If the new value should be stored within block,
5227 * we should free all the outside block first and
5228 * the modification to the xattr block will be done
5229 * by following steps.
5230 */
5231 if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
5232 value_len = xi->value_len;
5233 else
5234 value_len = 0;
5235
5236 ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
5237 value_len,
5238 ctxt);
5239 if (ret)
5240 goto out;
5241
5242 if (value_len)
5243 goto set_value_outside;
5244 }
5245
5246 value_len = xi->value_len;
5247 /* So we have to handle the inside block change now. */
5248 if (value_len > OCFS2_XATTR_INLINE_SIZE) {
5249 /*
5250 * If the new value will be stored outside of block,
5251 * initalize a new empty value root and insert it first.
5252 */
5253 local = 0;
5254 xi->value = &def_xv;
5255 xi->value_len = OCFS2_XATTR_ROOT_SIZE;
5256 }
5257
5258 ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs,
5259 name_hash, local);
5260 if (ret) {
5261 mlog_errno(ret);
5262 goto out;
5263 }
5264
5265 if (value_len <= OCFS2_XATTR_INLINE_SIZE)
5266 goto out;
5267
5268 /* allocate the space now for the outside block storage. */
5269 ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
5270 value_len, ctxt);
5271 if (ret) {
5272 mlog_errno(ret);
5273
5274 if (xs->not_found) {
5275 /*
5276 * We can't allocate enough clusters for outside
5277 * storage and we have allocated xattr already,
5278 * so need to remove it.
5279 */
5280 ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs);
5281 }
5282 goto out;
5283 }
5284
5285set_value_outside:
5286 ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle,
5287 xs, val, value_len);
5288out:
5289 return ret;
5290}
5291
5292/* 5506/*
5293 * check whether the xattr bucket is filled up with the same hash value. 5507 * check whether the xattr bucket is filled up with the same hash value.
5294 * If we want to insert the xattr with the same hash, return -ENOSPC. 5508 * If we want to insert the xattr with the same hash, return -ENOSPC.
@@ -5317,156 +5531,116 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
5317 return 0; 5531 return 0;
5318} 5532}
5319 5533
5320static int ocfs2_xattr_set_entry_index_block(struct inode *inode, 5534/*
5321 struct ocfs2_xattr_info *xi, 5535 * Try to set the entry in the current bucket. If we fail, the caller
5322 struct ocfs2_xattr_search *xs, 5536 * will handle getting us another bucket.
5323 struct ocfs2_xattr_set_ctxt *ctxt) 5537 */
5538static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
5539 struct ocfs2_xattr_info *xi,
5540 struct ocfs2_xattr_search *xs,
5541 struct ocfs2_xattr_set_ctxt *ctxt)
5324{ 5542{
5325 struct ocfs2_xattr_header *xh; 5543 int ret;
5326 struct ocfs2_xattr_entry *xe; 5544 struct ocfs2_xa_loc loc;
5327 u16 count, header_size, xh_free_start;
5328 int free, max_free, need, old;
5329 size_t value_size = 0, name_len = strlen(xi->name);
5330 size_t blocksize = inode->i_sb->s_blocksize;
5331 int ret, allocation = 0;
5332
5333 mlog_entry("Set xattr %s in xattr index block\n", xi->name);
5334
5335try_again:
5336 xh = xs->header;
5337 count = le16_to_cpu(xh->xh_count);
5338 xh_free_start = le16_to_cpu(xh->xh_free_start);
5339 header_size = sizeof(struct ocfs2_xattr_header) +
5340 count * sizeof(struct ocfs2_xattr_entry);
5341 max_free = OCFS2_XATTR_BUCKET_SIZE - header_size -
5342 le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP;
5343
5344 mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size "
5345 "of %u which exceed block size\n",
5346 (unsigned long long)bucket_blkno(xs->bucket),
5347 header_size);
5348 5545
5349 if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) 5546 mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name);
5350 value_size = OCFS2_XATTR_ROOT_SIZE;
5351 else if (xi->value)
5352 value_size = OCFS2_XATTR_SIZE(xi->value_len);
5353 5547
5354 if (xs->not_found) 5548 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
5355 need = sizeof(struct ocfs2_xattr_entry) + 5549 xs->not_found ? NULL : xs->here);
5356 OCFS2_XATTR_SIZE(name_len) + value_size; 5550 ret = ocfs2_xa_set(&loc, xi, ctxt);
5357 else { 5551 if (!ret) {
5358 need = value_size + OCFS2_XATTR_SIZE(name_len); 5552 xs->here = loc.xl_entry;
5553 goto out;
5554 }
5555 if (ret != -ENOSPC) {
5556 mlog_errno(ret);
5557 goto out;
5558 }
5359 5559
5360 /* 5560 /* Ok, we need space. Let's try defragmenting the bucket. */
5361 * We only replace the old value if the new length is smaller 5561 ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
5362 * than the old one. Otherwise we will allocate new space in the 5562 xs->bucket);
5363 * bucket to store it. 5563 if (ret) {
5364 */ 5564 mlog_errno(ret);
5365 xe = xs->here; 5565 goto out;
5366 if (ocfs2_xattr_is_local(xe)) 5566 }
5367 old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
5368 else
5369 old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
5370 5567
5371 if (old >= value_size) 5568 ret = ocfs2_xa_set(&loc, xi, ctxt);
5372 need = 0; 5569 if (!ret) {
5570 xs->here = loc.xl_entry;
5571 goto out;
5373 } 5572 }
5573 if (ret != -ENOSPC)
5574 mlog_errno(ret);
5374 5575
5375 free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP;
5376 /*
5377 * We need to make sure the new name/value pair
5378 * can exist in the same block.
5379 */
5380 if (xh_free_start % blocksize < need)
5381 free -= xh_free_start % blocksize;
5382
5383 mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, "
5384 "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len ="
5385 " %u\n", xs->not_found,
5386 (unsigned long long)bucket_blkno(xs->bucket),
5387 free, need, max_free, le16_to_cpu(xh->xh_free_start),
5388 le16_to_cpu(xh->xh_name_value_len));
5389
5390 if (free < need ||
5391 (xs->not_found &&
5392 count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) {
5393 if (need <= max_free &&
5394 count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) {
5395 /*
5396 * We can create the space by defragment. Since only the
5397 * name/value will be moved, the xe shouldn't be changed
5398 * in xs.
5399 */
5400 ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
5401 xs->bucket);
5402 if (ret) {
5403 mlog_errno(ret);
5404 goto out;
5405 }
5406 5576
5407 xh_free_start = le16_to_cpu(xh->xh_free_start); 5577out:
5408 free = xh_free_start - header_size 5578 mlog_exit(ret);
5409 - OCFS2_XATTR_HEADER_GAP; 5579 return ret;
5410 if (xh_free_start % blocksize < need) 5580}
5411 free -= xh_free_start % blocksize;
5412 5581
5413 if (free >= need) 5582static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
5414 goto xattr_set; 5583 struct ocfs2_xattr_info *xi,
5584 struct ocfs2_xattr_search *xs,
5585 struct ocfs2_xattr_set_ctxt *ctxt)
5586{
5587 int ret;
5415 5588
5416 mlog(0, "Can't get enough space for xattr insert by " 5589 mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name);
5417 "defragment. Need %u bytes, but we have %d, so "
5418 "allocate new bucket for it.\n", need, free);
5419 }
5420 5590
5421 /* 5591 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5422 * We have to add new buckets or clusters and one 5592 if (!ret)
5423 * allocation should leave us enough space for insert. 5593 goto out;
5424 */ 5594 if (ret != -ENOSPC) {
5425 BUG_ON(allocation); 5595 mlog_errno(ret);
5596 goto out;
5597 }
5426 5598
5427 /* 5599 /* Ack, need more space. Let's try to get another bucket! */
5428 * We do not allow for overlapping ranges between buckets. And
5429 * the maximum number of collisions we will allow for then is
5430 * one bucket's worth, so check it here whether we need to
5431 * add a new bucket for the insert.
5432 */
5433 ret = ocfs2_check_xattr_bucket_collision(inode,
5434 xs->bucket,
5435 xi->name);
5436 if (ret) {
5437 mlog_errno(ret);
5438 goto out;
5439 }
5440 5600
5441 ret = ocfs2_add_new_xattr_bucket(inode, 5601 /*
5442 xs->xattr_bh, 5602 * We do not allow for overlapping ranges between buckets. And
5603 * the maximum number of collisions we will allow for then is
5604 * one bucket's worth, so check it here whether we need to
5605 * add a new bucket for the insert.
5606 */
5607 ret = ocfs2_check_xattr_bucket_collision(inode,
5443 xs->bucket, 5608 xs->bucket,
5444 ctxt); 5609 xi->xi_name);
5445 if (ret) { 5610 if (ret) {
5446 mlog_errno(ret); 5611 mlog_errno(ret);
5447 goto out; 5612 goto out;
5448 } 5613 }
5449 5614
5450 /* 5615 ret = ocfs2_add_new_xattr_bucket(inode,
5451 * ocfs2_add_new_xattr_bucket() will have updated 5616 xs->xattr_bh,
5452 * xs->bucket if it moved, but it will not have updated 5617 xs->bucket,
5453 * any of the other search fields. Thus, we drop it and 5618 ctxt);
5454 * re-search. Everything should be cached, so it'll be 5619 if (ret) {
5455 * quick. 5620 mlog_errno(ret);
5456 */ 5621 goto out;
5457 ocfs2_xattr_bucket_relse(xs->bucket);
5458 ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
5459 xi->name_index,
5460 xi->name, xs);
5461 if (ret && ret != -ENODATA)
5462 goto out;
5463 xs->not_found = ret;
5464 allocation = 1;
5465 goto try_again;
5466 } 5622 }
5467 5623
5468xattr_set: 5624 /*
5469 ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt); 5625 * ocfs2_add_new_xattr_bucket() will have updated
5626 * xs->bucket if it moved, but it will not have updated
5627 * any of the other search fields. Thus, we drop it and
5628 * re-search. Everything should be cached, so it'll be
5629 * quick.
5630 */
5631 ocfs2_xattr_bucket_relse(xs->bucket);
5632 ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
5633 xi->xi_name_index,
5634 xi->xi_name, xs);
5635 if (ret && ret != -ENODATA)
5636 goto out;
5637 xs->not_found = ret;
5638
5639 /* Ok, we have a new bucket, let's try again */
5640 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5641 if (ret && (ret != -ENOSPC))
5642 mlog_errno(ret);
5643
5470out: 5644out:
5471 mlog_exit(ret); 5645 mlog_exit(ret);
5472 return ret; 5646 return ret;
@@ -5678,7 +5852,7 @@ static int ocfs2_prepare_refcount_xattr(struct inode *inode,
5678 * refcount tree, and make the original extent become 3. So we will need 5852 * refcount tree, and make the original extent become 3. So we will need
5679 * 2 * cluster more extent recs at most. 5853 * 2 * cluster more extent recs at most.
5680 */ 5854 */
5681 if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) { 5855 if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
5682 5856
5683 ret = ocfs2_refcounted_xattr_delete_need(inode, 5857 ret = ocfs2_refcounted_xattr_delete_need(inode,
5684 &(*ref_tree)->rf_ci, 5858 &(*ref_tree)->rf_ci,
@@ -6354,9 +6528,11 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
6354 int indexed) 6528 int indexed)
6355{ 6529{
6356 int ret; 6530 int ret;
6357 handle_t *handle;
6358 struct ocfs2_alloc_context *meta_ac; 6531 struct ocfs2_alloc_context *meta_ac;
6359 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 6532 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
6533 struct ocfs2_xattr_set_ctxt ctxt = {
6534 .meta_ac = meta_ac,
6535 };
6360 6536
6361 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); 6537 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
6362 if (ret < 0) { 6538 if (ret < 0) {
@@ -6364,21 +6540,21 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
6364 return ret; 6540 return ret;
6365 } 6541 }
6366 6542
6367 handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); 6543 ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
6368 if (IS_ERR(handle)) { 6544 if (IS_ERR(ctxt.handle)) {
6369 ret = PTR_ERR(handle); 6545 ret = PTR_ERR(ctxt.handle);
6370 mlog_errno(ret); 6546 mlog_errno(ret);
6371 goto out; 6547 goto out;
6372 } 6548 }
6373 6549
6374 mlog(0, "create new xattr block for inode %llu, index = %d\n", 6550 mlog(0, "create new xattr block for inode %llu, index = %d\n",
6375 (unsigned long long)fe_bh->b_blocknr, indexed); 6551 (unsigned long long)fe_bh->b_blocknr, indexed);
6376 ret = ocfs2_create_xattr_block(handle, inode, fe_bh, 6552 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
6377 meta_ac, ret_bh, indexed); 6553 ret_bh);
6378 if (ret) 6554 if (ret)
6379 mlog_errno(ret); 6555 mlog_errno(ret);
6380 6556
6381 ocfs2_commit_trans(osb, handle); 6557 ocfs2_commit_trans(osb, ctxt.handle);
6382out: 6558out:
6383 ocfs2_free_alloc_context(meta_ac); 6559 ocfs2_free_alloc_context(meta_ac);
6384 return ret; 6560 return ret;
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index f3b7c1541f3a..75d9b5ba1d45 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -11,6 +11,7 @@
11#include <linux/parser.h> 11#include <linux/parser.h>
12#include <linux/buffer_head.h> 12#include <linux/buffer_head.h>
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14#include <linux/writeback.h>
14#include <linux/crc-itu-t.h> 15#include <linux/crc-itu-t.h>
15#include "omfs.h" 16#include "omfs.h"
16 17
@@ -89,7 +90,7 @@ static void omfs_update_checksums(struct omfs_inode *oi)
89 oi->i_head.h_check_xor = xor; 90 oi->i_head.h_check_xor = xor;
90} 91}
91 92
92static int omfs_write_inode(struct inode *inode, int wait) 93static int __omfs_write_inode(struct inode *inode, int wait)
93{ 94{
94 struct omfs_inode *oi; 95 struct omfs_inode *oi;
95 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); 96 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
@@ -162,9 +163,14 @@ out:
162 return ret; 163 return ret;
163} 164}
164 165
166static int omfs_write_inode(struct inode *inode, struct writeback_control *wbc)
167{
168 return __omfs_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
169}
170
165int omfs_sync_inode(struct inode *inode) 171int omfs_sync_inode(struct inode *inode)
166{ 172{
167 return omfs_write_inode(inode, 1); 173 return __omfs_write_inode(inode, 1);
168} 174}
169 175
170/* 176/*
diff --git a/fs/open.c b/fs/open.c
index b740c4244833..e17f54454b50 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -270,7 +270,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
270 * Make sure that there are no leases. get_write_access() protects 270 * Make sure that there are no leases. get_write_access() protects
271 * against the truncate racing with a lease-granting setlease(). 271 * against the truncate racing with a lease-granting setlease().
272 */ 272 */
273 error = break_lease(inode, FMODE_WRITE); 273 error = break_lease(inode, O_WRONLY);
274 if (error) 274 if (error)
275 goto put_write_and_out; 275 goto put_write_and_out;
276 276
diff --git a/fs/pnode.c b/fs/pnode.c
index 8d5f392ec3d3..5cc564a83149 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -86,7 +86,7 @@ static int do_make_slave(struct vfsmount *mnt)
86 86
87 /* 87 /*
88 * slave 'mnt' to a peer mount that has the 88 * slave 'mnt' to a peer mount that has the
89 * same root dentry. If none is available than 89 * same root dentry. If none is available then
90 * slave it to anything that is available. 90 * slave it to anything that is available.
91 */ 91 */
92 while ((peer_mnt = next_peer(peer_mnt)) != mnt && 92 while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
@@ -147,6 +147,11 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
147 * get the next mount in the propagation tree. 147 * get the next mount in the propagation tree.
148 * @m: the mount seen last 148 * @m: the mount seen last
149 * @origin: the original mount from where the tree walk initiated 149 * @origin: the original mount from where the tree walk initiated
150 *
151 * Note that peer groups form contiguous segments of slave lists.
152 * We rely on that in get_source() to be able to find out if
153 * vfsmount found while iterating with propagation_next() is
154 * a peer of one we'd found earlier.
150 */ 155 */
151static struct vfsmount *propagation_next(struct vfsmount *m, 156static struct vfsmount *propagation_next(struct vfsmount *m,
152 struct vfsmount *origin) 157 struct vfsmount *origin)
@@ -186,10 +191,6 @@ static struct vfsmount *get_source(struct vfsmount *dest,
186{ 191{
187 struct vfsmount *p_last_src = NULL; 192 struct vfsmount *p_last_src = NULL;
188 struct vfsmount *p_last_dest = NULL; 193 struct vfsmount *p_last_dest = NULL;
189 *type = CL_PROPAGATION;
190
191 if (IS_MNT_SHARED(dest))
192 *type |= CL_MAKE_SHARED;
193 194
194 while (last_dest != dest->mnt_master) { 195 while (last_dest != dest->mnt_master) {
195 p_last_dest = last_dest; 196 p_last_dest = last_dest;
@@ -202,13 +203,18 @@ static struct vfsmount *get_source(struct vfsmount *dest,
202 do { 203 do {
203 p_last_dest = next_peer(p_last_dest); 204 p_last_dest = next_peer(p_last_dest);
204 } while (IS_MNT_NEW(p_last_dest)); 205 } while (IS_MNT_NEW(p_last_dest));
206 /* is that a peer of the earlier? */
207 if (dest == p_last_dest) {
208 *type = CL_MAKE_SHARED;
209 return p_last_src;
210 }
205 } 211 }
206 212 /* slave of the earlier, then */
207 if (dest != p_last_dest) { 213 *type = CL_SLAVE;
208 *type |= CL_SLAVE; 214 /* beginning of peer group among the slaves? */
209 return last_src; 215 if (IS_MNT_SHARED(dest))
210 } else 216 *type |= CL_MAKE_SHARED;
211 return p_last_src; 217 return last_src;
212} 218}
213 219
214/* 220/*
diff --git a/fs/pnode.h b/fs/pnode.h
index 958665d662af..1ea4ae1efcd3 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -21,12 +21,11 @@
21#define CL_SLAVE 0x02 21#define CL_SLAVE 0x02
22#define CL_COPY_ALL 0x04 22#define CL_COPY_ALL 0x04
23#define CL_MAKE_SHARED 0x08 23#define CL_MAKE_SHARED 0x08
24#define CL_PROPAGATION 0x10 24#define CL_PRIVATE 0x10
25#define CL_PRIVATE 0x20
26 25
27static inline void set_mnt_shared(struct vfsmount *mnt) 26static inline void set_mnt_shared(struct vfsmount *mnt)
28{ 27{
29 mnt->mnt_flags &= ~MNT_PNODE_MASK; 28 mnt->mnt_flags &= ~MNT_SHARED_MASK;
30 mnt->mnt_flags |= MNT_SHARED; 29 mnt->mnt_flags |= MNT_SHARED;
31} 30}
32 31
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 623e2ffb5d2b..a7310841c831 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -647,17 +647,11 @@ static int mounts_release(struct inode *inode, struct file *file)
647static unsigned mounts_poll(struct file *file, poll_table *wait) 647static unsigned mounts_poll(struct file *file, poll_table *wait)
648{ 648{
649 struct proc_mounts *p = file->private_data; 649 struct proc_mounts *p = file->private_data;
650 struct mnt_namespace *ns = p->ns;
651 unsigned res = POLLIN | POLLRDNORM; 650 unsigned res = POLLIN | POLLRDNORM;
652 651
653 poll_wait(file, &ns->poll, wait); 652 poll_wait(file, &p->ns->poll, wait);
654 653 if (mnt_had_events(p))
655 spin_lock(&vfsmount_lock);
656 if (p->event != ns->event) {
657 p->event = ns->event;
658 res |= POLLERR | POLLPRI; 654 res |= POLLERR | POLLPRI;
659 }
660 spin_unlock(&vfsmount_lock);
661 655
662 return res; 656 return res;
663} 657}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 480cb1065eec..9580abeadeb3 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -662,6 +662,7 @@ struct proc_dir_entry *proc_symlink(const char *name,
662 } 662 }
663 return ent; 663 return ent;
664} 664}
665EXPORT_SYMBOL(proc_symlink);
665 666
666struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, 667struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
667 struct proc_dir_entry *parent) 668 struct proc_dir_entry *parent)
@@ -700,6 +701,7 @@ struct proc_dir_entry *proc_mkdir(const char *name,
700{ 701{
701 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); 702 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
702} 703}
704EXPORT_SYMBOL(proc_mkdir);
703 705
704struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, 706struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
705 struct proc_dir_entry *parent) 707 struct proc_dir_entry *parent)
@@ -728,6 +730,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
728 } 730 }
729 return ent; 731 return ent;
730} 732}
733EXPORT_SYMBOL(create_proc_entry);
731 734
732struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, 735struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
733 struct proc_dir_entry *parent, 736 struct proc_dir_entry *parent,
@@ -762,6 +765,7 @@ out_free:
762out: 765out:
763 return NULL; 766 return NULL;
764} 767}
768EXPORT_SYMBOL(proc_create_data);
765 769
766static void free_proc_entry(struct proc_dir_entry *de) 770static void free_proc_entry(struct proc_dir_entry *de)
767{ 771{
@@ -853,3 +857,4 @@ continue_removing:
853 de->parent->name, de->name, de->subdir->name); 857 de->parent->name, de->name, de->subdir->name);
854 pde_put(de); 858 pde_put(de);
855} 859}
860EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index 7ca78346d3f0..cfe90a48a6e8 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -12,37 +12,37 @@
12#include <linux/poll.h> 12#include <linux/poll.h>
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/syslog.h>
15 16
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
17#include <asm/io.h> 18#include <asm/io.h>
18 19
19extern wait_queue_head_t log_wait; 20extern wait_queue_head_t log_wait;
20 21
21extern int do_syslog(int type, char __user *bug, int count);
22
23static int kmsg_open(struct inode * inode, struct file * file) 22static int kmsg_open(struct inode * inode, struct file * file)
24{ 23{
25 return do_syslog(1,NULL,0); 24 return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
26} 25}
27 26
28static int kmsg_release(struct inode * inode, struct file * file) 27static int kmsg_release(struct inode * inode, struct file * file)
29{ 28{
30 (void) do_syslog(0,NULL,0); 29 (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
31 return 0; 30 return 0;
32} 31}
33 32
34static ssize_t kmsg_read(struct file *file, char __user *buf, 33static ssize_t kmsg_read(struct file *file, char __user *buf,
35 size_t count, loff_t *ppos) 34 size_t count, loff_t *ppos)
36{ 35{
37 if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0)) 36 if ((file->f_flags & O_NONBLOCK) &&
37 !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
38 return -EAGAIN; 38 return -EAGAIN;
39 return do_syslog(2, buf, count); 39 return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
40} 40}
41 41
42static unsigned int kmsg_poll(struct file *file, poll_table *wait) 42static unsigned int kmsg_poll(struct file *file, poll_table *wait)
43{ 43{
44 poll_wait(file, &log_wait, wait); 44 poll_wait(file, &log_wait, wait);
45 if (do_syslog(9, NULL, 0)) 45 if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
46 return POLLIN | POLLRDNORM; 46 return POLLIN | POLLRDNORM;
47 return 0; 47 return 0;
48} 48}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index b080b791d9e3..757c069f2a65 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -220,9 +220,3 @@ void pid_ns_release_proc(struct pid_namespace *ns)
220{ 220{
221 mntput(ns->proc_mnt); 221 mntput(ns->proc_mnt);
222} 222}
223
224EXPORT_SYMBOL(proc_symlink);
225EXPORT_SYMBOL(proc_mkdir);
226EXPORT_SYMBOL(create_proc_entry);
227EXPORT_SYMBOL(proc_create_data);
228EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index b8671a54e8ed..d1da94b82d8f 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1618,7 +1618,7 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
1618** to properly mark inodes for datasync and such, but only actually 1618** to properly mark inodes for datasync and such, but only actually
1619** does something when called for a synchronous update. 1619** does something when called for a synchronous update.
1620*/ 1620*/
1621int reiserfs_write_inode(struct inode *inode, int do_sync) 1621int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1622{ 1622{
1623 struct reiserfs_transaction_handle th; 1623 struct reiserfs_transaction_handle th;
1624 int jbegin_count = 1; 1624 int jbegin_count = 1;
@@ -1630,7 +1630,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync)
1630 ** inode needs to reach disk for safety, and they can safely be 1630 ** inode needs to reach disk for safety, and they can safely be
1631 ** ignored because the altered inode has already been logged. 1631 ** ignored because the altered inode has already been logged.
1632 */ 1632 */
1633 if (do_sync && !(current->flags & PF_MEMALLOC)) { 1633 if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
1634 reiserfs_write_lock(inode->i_sb); 1634 reiserfs_write_lock(inode->i_sb);
1635 if (!journal_begin(&th, inode->i_sb, jbegin_count)) { 1635 if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
1636 reiserfs_update_sd(&th, inode); 1636 reiserfs_update_sd(&th, inode);
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 70e3244fa30f..df8a19ef870d 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_SQUASHFS) += squashfs.o 5obj-$(CONFIG_SQUASHFS) += squashfs.o
6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o 6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
7squashfs-y += namei.o super.o symlink.o 7squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2a7960310349..1cb0d81b164b 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -29,15 +29,14 @@
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/vfs.h> 30#include <linux/vfs.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/mutex.h>
33#include <linux/string.h> 32#include <linux/string.h>
34#include <linux/buffer_head.h> 33#include <linux/buffer_head.h>
35#include <linux/zlib.h>
36 34
37#include "squashfs_fs.h" 35#include "squashfs_fs.h"
38#include "squashfs_fs_sb.h" 36#include "squashfs_fs_sb.h"
39#include "squashfs_fs_i.h" 37#include "squashfs_fs_i.h"
40#include "squashfs.h" 38#include "squashfs.h"
39#include "decompressor.h"
41 40
42/* 41/*
43 * Read the metadata block length, this is stored in the first two 42 * Read the metadata block length, this is stored in the first two
@@ -153,72 +152,10 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
153 } 152 }
154 153
155 if (compressed) { 154 if (compressed) {
156 int zlib_err = 0, zlib_init = 0; 155 length = squashfs_decompress(msblk, buffer, bh, b, offset,
157 156 length, srclength, pages);
158 /* 157 if (length < 0)
159 * Uncompress block. 158 goto read_failure;
160 */
161
162 mutex_lock(&msblk->read_data_mutex);
163
164 msblk->stream.avail_out = 0;
165 msblk->stream.avail_in = 0;
166
167 bytes = length;
168 do {
169 if (msblk->stream.avail_in == 0 && k < b) {
170 avail = min(bytes, msblk->devblksize - offset);
171 bytes -= avail;
172 wait_on_buffer(bh[k]);
173 if (!buffer_uptodate(bh[k]))
174 goto release_mutex;
175
176 if (avail == 0) {
177 offset = 0;
178 put_bh(bh[k++]);
179 continue;
180 }
181
182 msblk->stream.next_in = bh[k]->b_data + offset;
183 msblk->stream.avail_in = avail;
184 offset = 0;
185 }
186
187 if (msblk->stream.avail_out == 0 && page < pages) {
188 msblk->stream.next_out = buffer[page++];
189 msblk->stream.avail_out = PAGE_CACHE_SIZE;
190 }
191
192 if (!zlib_init) {
193 zlib_err = zlib_inflateInit(&msblk->stream);
194 if (zlib_err != Z_OK) {
195 ERROR("zlib_inflateInit returned"
196 " unexpected result 0x%x,"
197 " srclength %d\n", zlib_err,
198 srclength);
199 goto release_mutex;
200 }
201 zlib_init = 1;
202 }
203
204 zlib_err = zlib_inflate(&msblk->stream, Z_SYNC_FLUSH);
205
206 if (msblk->stream.avail_in == 0 && k < b)
207 put_bh(bh[k++]);
208 } while (zlib_err == Z_OK);
209
210 if (zlib_err != Z_STREAM_END) {
211 ERROR("zlib_inflate error, data probably corrupt\n");
212 goto release_mutex;
213 }
214
215 zlib_err = zlib_inflateEnd(&msblk->stream);
216 if (zlib_err != Z_OK) {
217 ERROR("zlib_inflate error, data probably corrupt\n");
218 goto release_mutex;
219 }
220 length = msblk->stream.total_out;
221 mutex_unlock(&msblk->read_data_mutex);
222 } else { 159 } else {
223 /* 160 /*
224 * Block is uncompressed. 161 * Block is uncompressed.
@@ -255,9 +192,6 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
255 kfree(bh); 192 kfree(bh);
256 return length; 193 return length;
257 194
258release_mutex:
259 mutex_unlock(&msblk->read_data_mutex);
260
261block_release: 195block_release:
262 for (; k < b; k++) 196 for (; k < b; k++)
263 put_bh(bh[k]); 197 put_bh(bh[k]);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 40c98fa6b5d6..57314bee9059 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -51,7 +51,6 @@
51#include <linux/sched.h> 51#include <linux/sched.h>
52#include <linux/spinlock.h> 52#include <linux/spinlock.h>
53#include <linux/wait.h> 53#include <linux/wait.h>
54#include <linux/zlib.h>
55#include <linux/pagemap.h> 54#include <linux/pagemap.h>
56 55
57#include "squashfs_fs.h" 56#include "squashfs_fs.h"
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
new file mode 100644
index 000000000000..157478da6ac9
--- /dev/null
+++ b/fs/squashfs/decompressor.c
@@ -0,0 +1,68 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * decompressor.c
22 */
23
24#include <linux/types.h>
25#include <linux/mutex.h>
26#include <linux/buffer_head.h>
27
28#include "squashfs_fs.h"
29#include "squashfs_fs_sb.h"
30#include "squashfs_fs_i.h"
31#include "decompressor.h"
32#include "squashfs.h"
33
34/*
35 * This file (and decompressor.h) implements a decompressor framework for
36 * Squashfs, allowing multiple decompressors to be easily supported
37 */
38
39static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
40 NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
41};
42
43static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = {
44 NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
45};
46
47static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
48 NULL, NULL, NULL, 0, "unknown", 0
49};
50
51static const struct squashfs_decompressor *decompressor[] = {
52 &squashfs_zlib_comp_ops,
53 &squashfs_lzma_unsupported_comp_ops,
54 &squashfs_lzo_unsupported_comp_ops,
55 &squashfs_unknown_comp_ops
56};
57
58
59const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
60{
61 int i;
62
63 for (i = 0; decompressor[i]->id; i++)
64 if (id == decompressor[i]->id)
65 break;
66
67 return decompressor[i];
68}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
new file mode 100644
index 000000000000..7425f80783f6
--- /dev/null
+++ b/fs/squashfs/decompressor.h
@@ -0,0 +1,55 @@
1#ifndef DECOMPRESSOR_H
2#define DECOMPRESSOR_H
3/*
4 * Squashfs - a compressed read only filesystem for Linux
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
7 * Phillip Lougher <phillip@lougher.demon.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2,
12 * or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 *
23 * decompressor.h
24 */
25
26struct squashfs_decompressor {
27 void *(*init)(struct squashfs_sb_info *);
28 void (*free)(void *);
29 int (*decompress)(struct squashfs_sb_info *, void **,
30 struct buffer_head **, int, int, int, int, int);
31 int id;
32 char *name;
33 int supported;
34};
35
36static inline void *squashfs_decompressor_init(struct squashfs_sb_info *msblk)
37{
38 return msblk->decompressor->init(msblk);
39}
40
41static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk,
42 void *s)
43{
44 if (msblk->decompressor)
45 msblk->decompressor->free(s);
46}
47
48static inline int squashfs_decompress(struct squashfs_sb_info *msblk,
49 void **buffer, struct buffer_head **bh, int b, int offset, int length,
50 int srclength, int pages)
51{
52 return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
53 length, srclength, pages);
54}
55#endif
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 566b0eaed868..12b933ac6585 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -30,7 +30,6 @@
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/vfs.h> 31#include <linux/vfs.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/zlib.h>
34 33
35#include "squashfs_fs.h" 34#include "squashfs_fs.h"
36#include "squashfs_fs_sb.h" 35#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 2b1b8fe5e037..7f93d5a9ee05 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -39,7 +39,6 @@
39#include <linux/vfs.h> 39#include <linux/vfs.h>
40#include <linux/dcache.h> 40#include <linux/dcache.h>
41#include <linux/exportfs.h> 41#include <linux/exportfs.h>
42#include <linux/zlib.h>
43#include <linux/slab.h> 42#include <linux/slab.h>
44 43
45#include "squashfs_fs.h" 44#include "squashfs_fs.h"
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 717767d831df..a25c5060bdcb 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -47,7 +47,6 @@
47#include <linux/string.h> 47#include <linux/string.h>
48#include <linux/pagemap.h> 48#include <linux/pagemap.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/zlib.h>
51 50
52#include "squashfs_fs.h" 51#include "squashfs_fs.h"
53#include "squashfs_fs_sb.h" 52#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index b5a2c15bbbc7..7c90bbd6879d 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -36,7 +36,6 @@
36#include <linux/fs.h> 36#include <linux/fs.h>
37#include <linux/vfs.h> 37#include <linux/vfs.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/zlib.h>
40 39
41#include "squashfs_fs.h" 40#include "squashfs_fs.h"
42#include "squashfs_fs_sb.h" 41#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index 3795b837ba28..b7f64bcd2b70 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -34,7 +34,6 @@
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/vfs.h> 35#include <linux/vfs.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/zlib.h>
38 37
39#include "squashfs_fs.h" 38#include "squashfs_fs.h"
40#include "squashfs_fs_sb.h" 39#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 9101dbde39ec..49daaf669e41 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -40,7 +40,6 @@
40 40
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/vfs.h> 42#include <linux/vfs.h>
43#include <linux/zlib.h>
44 43
45#include "squashfs_fs.h" 44#include "squashfs_fs.h"
46#include "squashfs_fs_sb.h" 45#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 9e398653b22b..5266bd8ad932 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -57,7 +57,6 @@
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/string.h> 58#include <linux/string.h>
59#include <linux/dcache.h> 59#include <linux/dcache.h>
60#include <linux/zlib.h>
61 60
62#include "squashfs_fs.h" 61#include "squashfs_fs.h"
63#include "squashfs_fs_sb.h" 62#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 0e9feb6adf7e..fe2587af5512 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -51,6 +51,9 @@ extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
51 u64, int); 51 u64, int);
52extern int squashfs_read_table(struct super_block *, void *, u64, int); 52extern int squashfs_read_table(struct super_block *, void *, u64, int);
53 53
54/* decompressor.c */
55extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
56
54/* export.c */ 57/* export.c */
55extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, 58extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64,
56 unsigned int); 59 unsigned int);
@@ -71,7 +74,7 @@ extern struct inode *squashfs_iget(struct super_block *, long long,
71extern int squashfs_read_inode(struct inode *, long long); 74extern int squashfs_read_inode(struct inode *, long long);
72 75
73/* 76/*
74 * Inodes and files operations 77 * Inodes, files and decompressor operations
75 */ 78 */
76 79
77/* dir.c */ 80/* dir.c */
@@ -88,3 +91,6 @@ extern const struct inode_operations squashfs_dir_inode_ops;
88 91
89/* symlink.c */ 92/* symlink.c */
90extern const struct address_space_operations squashfs_symlink_aops; 93extern const struct address_space_operations squashfs_symlink_aops;
94
95/* zlib_wrapper.c */
96extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 283daafc568e..79024245ea00 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -183,8 +183,6 @@
183#define SQUASHFS_MAX_FILE_SIZE (1LL << \ 183#define SQUASHFS_MAX_FILE_SIZE (1LL << \
184 (SQUASHFS_MAX_FILE_SIZE_LOG - 2)) 184 (SQUASHFS_MAX_FILE_SIZE_LOG - 2))
185 185
186#define SQUASHFS_MARKER_BYTE 0xff
187
188/* meta index cache */ 186/* meta index cache */
189#define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int)) 187#define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
190#define SQUASHFS_META_ENTRIES 127 188#define SQUASHFS_META_ENTRIES 127
@@ -211,7 +209,9 @@ struct meta_index {
211/* 209/*
212 * definitions for structures on disk 210 * definitions for structures on disk
213 */ 211 */
214#define ZLIB_COMPRESSION 1 212#define ZLIB_COMPRESSION 1
213#define LZMA_COMPRESSION 2
214#define LZO_COMPRESSION 3
215 215
216struct squashfs_super_block { 216struct squashfs_super_block {
217 __le32 s_magic; 217 __le32 s_magic;
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index c8c65614dd1c..2e77dc547e25 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -52,25 +52,25 @@ struct squashfs_cache_entry {
52}; 52};
53 53
54struct squashfs_sb_info { 54struct squashfs_sb_info {
55 int devblksize; 55 const struct squashfs_decompressor *decompressor;
56 int devblksize_log2; 56 int devblksize;
57 struct squashfs_cache *block_cache; 57 int devblksize_log2;
58 struct squashfs_cache *fragment_cache; 58 struct squashfs_cache *block_cache;
59 struct squashfs_cache *read_page; 59 struct squashfs_cache *fragment_cache;
60 int next_meta_index; 60 struct squashfs_cache *read_page;
61 __le64 *id_table; 61 int next_meta_index;
62 __le64 *fragment_index; 62 __le64 *id_table;
63 unsigned int *fragment_index_2; 63 __le64 *fragment_index;
64 struct mutex read_data_mutex; 64 struct mutex read_data_mutex;
65 struct mutex meta_index_mutex; 65 struct mutex meta_index_mutex;
66 struct meta_index *meta_index; 66 struct meta_index *meta_index;
67 z_stream stream; 67 void *stream;
68 __le64 *inode_lookup_table; 68 __le64 *inode_lookup_table;
69 u64 inode_table; 69 u64 inode_table;
70 u64 directory_table; 70 u64 directory_table;
71 unsigned int block_size; 71 unsigned int block_size;
72 unsigned short block_log; 72 unsigned short block_log;
73 long long bytes_used; 73 long long bytes_used;
74 unsigned int inodes; 74 unsigned int inodes;
75}; 75};
76#endif 76#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 6c197ef53add..3550aec2f655 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -35,34 +35,41 @@
35#include <linux/pagemap.h> 35#include <linux/pagemap.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/zlib.h>
39#include <linux/magic.h> 38#include <linux/magic.h>
40 39
41#include "squashfs_fs.h" 40#include "squashfs_fs.h"
42#include "squashfs_fs_sb.h" 41#include "squashfs_fs_sb.h"
43#include "squashfs_fs_i.h" 42#include "squashfs_fs_i.h"
44#include "squashfs.h" 43#include "squashfs.h"
44#include "decompressor.h"
45 45
46static struct file_system_type squashfs_fs_type; 46static struct file_system_type squashfs_fs_type;
47static const struct super_operations squashfs_super_ops; 47static const struct super_operations squashfs_super_ops;
48 48
49static int supported_squashfs_filesystem(short major, short minor, short comp) 49static const struct squashfs_decompressor *supported_squashfs_filesystem(short
50 major, short minor, short id)
50{ 51{
52 const struct squashfs_decompressor *decompressor;
53
51 if (major < SQUASHFS_MAJOR) { 54 if (major < SQUASHFS_MAJOR) {
52 ERROR("Major/Minor mismatch, older Squashfs %d.%d " 55 ERROR("Major/Minor mismatch, older Squashfs %d.%d "
53 "filesystems are unsupported\n", major, minor); 56 "filesystems are unsupported\n", major, minor);
54 return -EINVAL; 57 return NULL;
55 } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) { 58 } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) {
56 ERROR("Major/Minor mismatch, trying to mount newer " 59 ERROR("Major/Minor mismatch, trying to mount newer "
57 "%d.%d filesystem\n", major, minor); 60 "%d.%d filesystem\n", major, minor);
58 ERROR("Please update your kernel\n"); 61 ERROR("Please update your kernel\n");
59 return -EINVAL; 62 return NULL;
60 } 63 }
61 64
62 if (comp != ZLIB_COMPRESSION) 65 decompressor = squashfs_lookup_decompressor(id);
63 return -EINVAL; 66 if (!decompressor->supported) {
67 ERROR("Filesystem uses \"%s\" compression. This is not "
68 "supported\n", decompressor->name);
69 return NULL;
70 }
64 71
65 return 0; 72 return decompressor;
66} 73}
67 74
68 75
@@ -87,13 +94,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
87 } 94 }
88 msblk = sb->s_fs_info; 95 msblk = sb->s_fs_info;
89 96
90 msblk->stream.workspace = kmalloc(zlib_inflate_workspacesize(),
91 GFP_KERNEL);
92 if (msblk->stream.workspace == NULL) {
93 ERROR("Failed to allocate zlib workspace\n");
94 goto failure;
95 }
96
97 sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); 97 sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
98 if (sblk == NULL) { 98 if (sblk == NULL) {
99 ERROR("Failed to allocate squashfs_super_block\n"); 99 ERROR("Failed to allocate squashfs_super_block\n");
@@ -120,25 +120,25 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
120 goto failed_mount; 120 goto failed_mount;
121 } 121 }
122 122
123 err = -EINVAL;
124
123 /* Check it is a SQUASHFS superblock */ 125 /* Check it is a SQUASHFS superblock */
124 sb->s_magic = le32_to_cpu(sblk->s_magic); 126 sb->s_magic = le32_to_cpu(sblk->s_magic);
125 if (sb->s_magic != SQUASHFS_MAGIC) { 127 if (sb->s_magic != SQUASHFS_MAGIC) {
126 if (!silent) 128 if (!silent)
127 ERROR("Can't find a SQUASHFS superblock on %s\n", 129 ERROR("Can't find a SQUASHFS superblock on %s\n",
128 bdevname(sb->s_bdev, b)); 130 bdevname(sb->s_bdev, b));
129 err = -EINVAL;
130 goto failed_mount; 131 goto failed_mount;
131 } 132 }
132 133
133 /* Check the MAJOR & MINOR versions and compression type */ 134 /* Check the MAJOR & MINOR versions and lookup compression type */
134 err = supported_squashfs_filesystem(le16_to_cpu(sblk->s_major), 135 msblk->decompressor = supported_squashfs_filesystem(
136 le16_to_cpu(sblk->s_major),
135 le16_to_cpu(sblk->s_minor), 137 le16_to_cpu(sblk->s_minor),
136 le16_to_cpu(sblk->compression)); 138 le16_to_cpu(sblk->compression));
137 if (err < 0) 139 if (msblk->decompressor == NULL)
138 goto failed_mount; 140 goto failed_mount;
139 141
140 err = -EINVAL;
141
142 /* 142 /*
143 * Check if there's xattrs in the filesystem. These are not 143 * Check if there's xattrs in the filesystem. These are not
144 * supported in this version, so warn that they will be ignored. 144 * supported in this version, so warn that they will be ignored.
@@ -205,6 +205,10 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
205 205
206 err = -ENOMEM; 206 err = -ENOMEM;
207 207
208 msblk->stream = squashfs_decompressor_init(msblk);
209 if (msblk->stream == NULL)
210 goto failed_mount;
211
208 msblk->block_cache = squashfs_cache_init("metadata", 212 msblk->block_cache = squashfs_cache_init("metadata",
209 SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); 213 SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
210 if (msblk->block_cache == NULL) 214 if (msblk->block_cache == NULL)
@@ -292,17 +296,16 @@ failed_mount:
292 squashfs_cache_delete(msblk->block_cache); 296 squashfs_cache_delete(msblk->block_cache);
293 squashfs_cache_delete(msblk->fragment_cache); 297 squashfs_cache_delete(msblk->fragment_cache);
294 squashfs_cache_delete(msblk->read_page); 298 squashfs_cache_delete(msblk->read_page);
299 squashfs_decompressor_free(msblk, msblk->stream);
295 kfree(msblk->inode_lookup_table); 300 kfree(msblk->inode_lookup_table);
296 kfree(msblk->fragment_index); 301 kfree(msblk->fragment_index);
297 kfree(msblk->id_table); 302 kfree(msblk->id_table);
298 kfree(msblk->stream.workspace);
299 kfree(sb->s_fs_info); 303 kfree(sb->s_fs_info);
300 sb->s_fs_info = NULL; 304 sb->s_fs_info = NULL;
301 kfree(sblk); 305 kfree(sblk);
302 return err; 306 return err;
303 307
304failure: 308failure:
305 kfree(msblk->stream.workspace);
306 kfree(sb->s_fs_info); 309 kfree(sb->s_fs_info);
307 sb->s_fs_info = NULL; 310 sb->s_fs_info = NULL;
308 return -ENOMEM; 311 return -ENOMEM;
@@ -346,10 +349,10 @@ static void squashfs_put_super(struct super_block *sb)
346 squashfs_cache_delete(sbi->block_cache); 349 squashfs_cache_delete(sbi->block_cache);
347 squashfs_cache_delete(sbi->fragment_cache); 350 squashfs_cache_delete(sbi->fragment_cache);
348 squashfs_cache_delete(sbi->read_page); 351 squashfs_cache_delete(sbi->read_page);
352 squashfs_decompressor_free(sbi, sbi->stream);
349 kfree(sbi->id_table); 353 kfree(sbi->id_table);
350 kfree(sbi->fragment_index); 354 kfree(sbi->fragment_index);
351 kfree(sbi->meta_index); 355 kfree(sbi->meta_index);
352 kfree(sbi->stream.workspace);
353 kfree(sb->s_fs_info); 356 kfree(sb->s_fs_info);
354 sb->s_fs_info = NULL; 357 sb->s_fs_info = NULL;
355 } 358 }
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 83d87880aac8..e80be2022a7f 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -36,7 +36,6 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/string.h> 37#include <linux/string.h>
38#include <linux/pagemap.h> 38#include <linux/pagemap.h>
39#include <linux/zlib.h>
40 39
41#include "squashfs_fs.h" 40#include "squashfs_fs.h"
42#include "squashfs_fs_sb.h" 41#include "squashfs_fs_sb.h"
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
new file mode 100644
index 000000000000..4dd70e04333b
--- /dev/null
+++ b/fs/squashfs/zlib_wrapper.c
@@ -0,0 +1,150 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * zlib_wrapper.c
22 */
23
24
25#include <linux/mutex.h>
26#include <linux/buffer_head.h>
27#include <linux/zlib.h>
28
29#include "squashfs_fs.h"
30#include "squashfs_fs_sb.h"
31#include "squashfs_fs_i.h"
32#include "squashfs.h"
33#include "decompressor.h"
34
35static void *zlib_init(struct squashfs_sb_info *dummy)
36{
37 z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
38 if (stream == NULL)
39 goto failed;
40 stream->workspace = kmalloc(zlib_inflate_workspacesize(),
41 GFP_KERNEL);
42 if (stream->workspace == NULL)
43 goto failed;
44
45 return stream;
46
47failed:
48 ERROR("Failed to allocate zlib workspace\n");
49 kfree(stream);
50 return NULL;
51}
52
53
54static void zlib_free(void *strm)
55{
56 z_stream *stream = strm;
57
58 if (stream)
59 kfree(stream->workspace);
60 kfree(stream);
61}
62
63
64static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
65 struct buffer_head **bh, int b, int offset, int length, int srclength,
66 int pages)
67{
68 int zlib_err = 0, zlib_init = 0;
69 int avail, bytes, k = 0, page = 0;
70 z_stream *stream = msblk->stream;
71
72 mutex_lock(&msblk->read_data_mutex);
73
74 stream->avail_out = 0;
75 stream->avail_in = 0;
76
77 bytes = length;
78 do {
79 if (stream->avail_in == 0 && k < b) {
80 avail = min(bytes, msblk->devblksize - offset);
81 bytes -= avail;
82 wait_on_buffer(bh[k]);
83 if (!buffer_uptodate(bh[k]))
84 goto release_mutex;
85
86 if (avail == 0) {
87 offset = 0;
88 put_bh(bh[k++]);
89 continue;
90 }
91
92 stream->next_in = bh[k]->b_data + offset;
93 stream->avail_in = avail;
94 offset = 0;
95 }
96
97 if (stream->avail_out == 0 && page < pages) {
98 stream->next_out = buffer[page++];
99 stream->avail_out = PAGE_CACHE_SIZE;
100 }
101
102 if (!zlib_init) {
103 zlib_err = zlib_inflateInit(stream);
104 if (zlib_err != Z_OK) {
105 ERROR("zlib_inflateInit returned unexpected "
106 "result 0x%x, srclength %d\n",
107 zlib_err, srclength);
108 goto release_mutex;
109 }
110 zlib_init = 1;
111 }
112
113 zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH);
114
115 if (stream->avail_in == 0 && k < b)
116 put_bh(bh[k++]);
117 } while (zlib_err == Z_OK);
118
119 if (zlib_err != Z_STREAM_END) {
120 ERROR("zlib_inflate error, data probably corrupt\n");
121 goto release_mutex;
122 }
123
124 zlib_err = zlib_inflateEnd(stream);
125 if (zlib_err != Z_OK) {
126 ERROR("zlib_inflate error, data probably corrupt\n");
127 goto release_mutex;
128 }
129
130 mutex_unlock(&msblk->read_data_mutex);
131 return stream->total_out;
132
133release_mutex:
134 mutex_unlock(&msblk->read_data_mutex);
135
136 for (; k < b; k++)
137 put_bh(bh[k]);
138
139 return -EIO;
140}
141
142const struct squashfs_decompressor squashfs_zlib_comp_ops = {
143 .init = zlib_init,
144 .free = zlib_free,
145 .decompress = zlib_uncompress,
146 .id = ZLIB_COMPRESSION,
147 .name = "zlib",
148 .supported = 1
149};
150
diff --git a/fs/super.c b/fs/super.c
index aff046b0fe78..f35ac6022109 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -568,7 +568,7 @@ out:
568int do_remount_sb(struct super_block *sb, int flags, void *data, int force) 568int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
569{ 569{
570 int retval; 570 int retval;
571 int remount_rw; 571 int remount_rw, remount_ro;
572 572
573 if (sb->s_frozen != SB_UNFROZEN) 573 if (sb->s_frozen != SB_UNFROZEN)
574 return -EBUSY; 574 return -EBUSY;
@@ -583,9 +583,12 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
583 shrink_dcache_sb(sb); 583 shrink_dcache_sb(sb);
584 sync_filesystem(sb); 584 sync_filesystem(sb);
585 585
586 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
587 remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
588
586 /* If we are remounting RDONLY and current sb is read/write, 589 /* If we are remounting RDONLY and current sb is read/write,
587 make sure there are no rw files opened */ 590 make sure there are no rw files opened */
588 if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) { 591 if (remount_ro) {
589 if (force) 592 if (force)
590 mark_files_ro(sb); 593 mark_files_ro(sb);
591 else if (!fs_may_remount_ro(sb)) 594 else if (!fs_may_remount_ro(sb))
@@ -594,7 +597,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
594 if (retval < 0 && retval != -ENOSYS) 597 if (retval < 0 && retval != -ENOSYS)
595 return -EBUSY; 598 return -EBUSY;
596 } 599 }
597 remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
598 600
599 if (sb->s_op->remount_fs) { 601 if (sb->s_op->remount_fs) {
600 retval = sb->s_op->remount_fs(sb, &flags, data); 602 retval = sb->s_op->remount_fs(sb, &flags, data);
@@ -604,6 +606,16 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
604 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 606 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
605 if (remount_rw) 607 if (remount_rw)
606 vfs_dq_quota_on_remount(sb); 608 vfs_dq_quota_on_remount(sb);
609 /*
610 * Some filesystems modify their metadata via some other path than the
611 * bdev buffer cache (eg. use a private mapping, or directories in
612 * pagecache, etc). Also file data modifications go via their own
613 * mappings. So If we try to mount readonly then copy the filesystem
614 * from bdev, we could get stale data, so invalidate it to give a best
615 * effort at coherency.
616 */
617 if (remount_ro && sb->s_bdev)
618 invalidate_bdev(sb->s_bdev);
607 return 0; 619 return 0;
608} 620}
609 621
@@ -925,6 +937,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
925 if (!mnt) 937 if (!mnt)
926 goto out; 938 goto out;
927 939
940 if (flags & MS_KERNMOUNT)
941 mnt->mnt_flags = MNT_INTERNAL;
942
928 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { 943 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
929 secdata = alloc_secdata(); 944 secdata = alloc_secdata();
930 if (!secdata) 945 if (!secdata)
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 9824743832a7..4573734d723d 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -26,6 +26,7 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
28#include <linux/vfs.h> 28#include <linux/vfs.h>
29#include <linux/writeback.h>
29#include <linux/namei.h> 30#include <linux/namei.h>
30#include <asm/byteorder.h> 31#include <asm/byteorder.h>
31#include "sysv.h" 32#include "sysv.h"
@@ -246,7 +247,7 @@ bad_inode:
246 return ERR_PTR(-EIO); 247 return ERR_PTR(-EIO);
247} 248}
248 249
249int sysv_write_inode(struct inode *inode, int wait) 250static int __sysv_write_inode(struct inode *inode, int wait)
250{ 251{
251 struct super_block * sb = inode->i_sb; 252 struct super_block * sb = inode->i_sb;
252 struct sysv_sb_info * sbi = SYSV_SB(sb); 253 struct sysv_sb_info * sbi = SYSV_SB(sb);
@@ -296,9 +297,14 @@ int sysv_write_inode(struct inode *inode, int wait)
296 return 0; 297 return 0;
297} 298}
298 299
300int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
301{
302 return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
303}
304
299int sysv_sync_inode(struct inode *inode) 305int sysv_sync_inode(struct inode *inode)
300{ 306{
301 return sysv_write_inode(inode, 1); 307 return __sysv_write_inode(inode, 1);
302} 308}
303 309
304static void sysv_delete_inode(struct inode *inode) 310static void sysv_delete_inode(struct inode *inode)
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 53786eb5cf60..94cb9b4d76c2 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -142,7 +142,7 @@ extern int __sysv_write_begin(struct file *file, struct address_space *mapping,
142 142
143/* inode.c */ 143/* inode.c */
144extern struct inode *sysv_iget(struct super_block *, unsigned int); 144extern struct inode *sysv_iget(struct super_block *, unsigned int);
145extern int sysv_write_inode(struct inode *, int); 145extern int sysv_write_inode(struct inode *, struct writeback_control *wbc);
146extern int sysv_sync_inode(struct inode *); 146extern int sysv_sync_inode(struct inode *);
147extern void sysv_set_inode(struct inode *, dev_t); 147extern void sysv_set_inode(struct inode *, dev_t);
148extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *); 148extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 552fb0111fff..401e503d44a1 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1120,7 +1120,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
1120 if (release) 1120 if (release)
1121 ubifs_release_budget(c, &ino_req); 1121 ubifs_release_budget(c, &ino_req);
1122 if (IS_SYNC(old_inode)) 1122 if (IS_SYNC(old_inode))
1123 err = old_inode->i_sb->s_op->write_inode(old_inode, 1); 1123 err = old_inode->i_sb->s_op->write_inode(old_inode, NULL);
1124 return err; 1124 return err;
1125 1125
1126out_cancel: 1126out_cancel:
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 16a6444330ec..e26c02ab6cd5 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1011,7 +1011,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1011 /* Is the page fully inside @i_size? */ 1011 /* Is the page fully inside @i_size? */
1012 if (page->index < end_index) { 1012 if (page->index < end_index) {
1013 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { 1013 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
1014 err = inode->i_sb->s_op->write_inode(inode, 1); 1014 err = inode->i_sb->s_op->write_inode(inode, NULL);
1015 if (err) 1015 if (err)
1016 goto out_unlock; 1016 goto out_unlock;
1017 /* 1017 /*
@@ -1039,7 +1039,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1039 kunmap_atomic(kaddr, KM_USER0); 1039 kunmap_atomic(kaddr, KM_USER0);
1040 1040
1041 if (i_size > synced_i_size) { 1041 if (i_size > synced_i_size) {
1042 err = inode->i_sb->s_op->write_inode(inode, 1); 1042 err = inode->i_sb->s_op->write_inode(inode, NULL);
1043 if (err) 1043 if (err)
1044 goto out_unlock; 1044 goto out_unlock;
1045 } 1045 }
@@ -1242,7 +1242,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
1242 if (release) 1242 if (release)
1243 ubifs_release_budget(c, &req); 1243 ubifs_release_budget(c, &req);
1244 if (IS_SYNC(inode)) 1244 if (IS_SYNC(inode))
1245 err = inode->i_sb->s_op->write_inode(inode, 1); 1245 err = inode->i_sb->s_op->write_inode(inode, NULL);
1246 return err; 1246 return err;
1247 1247
1248out: 1248out:
@@ -1316,7 +1316,7 @@ int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1316 * the inode unless this is a 'datasync()' call. 1316 * the inode unless this is a 'datasync()' call.
1317 */ 1317 */
1318 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { 1318 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1319 err = inode->i_sb->s_op->write_inode(inode, 1); 1319 err = inode->i_sb->s_op->write_inode(inode, NULL);
1320 if (err) 1320 if (err)
1321 return err; 1321 return err;
1322 } 1322 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 43f9d19a6f33..4d2f2157dd3f 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -283,7 +283,7 @@ static void ubifs_destroy_inode(struct inode *inode)
283/* 283/*
284 * Note, Linux write-back code calls this without 'i_mutex'. 284 * Note, Linux write-back code calls this without 'i_mutex'.
285 */ 285 */
286static int ubifs_write_inode(struct inode *inode, int wait) 286static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
287{ 287{
288 int err = 0; 288 int err = 0;
289 struct ubifs_info *c = inode->i_sb->s_fs_info; 289 struct ubifs_info *c = inode->i_sb->s_fs_info;
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index e2ff180173a2..ccc3ad7242d4 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -551,7 +551,7 @@ static void udf_table_free_blocks(struct super_block *sb,
551 } 551 }
552 552
553 if (epos.offset + (2 * adsize) > sb->s_blocksize) { 553 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
554 char *sptr, *dptr; 554 unsigned char *sptr, *dptr;
555 int loffset; 555 int loffset;
556 556
557 brelse(oepos.bh); 557 brelse(oepos.bh);
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 61d9a76a3a69..f0f2a436251e 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -45,8 +45,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
45 int block, iblock; 45 int block, iblock;
46 loff_t nf_pos = (filp->f_pos - 1) << 2; 46 loff_t nf_pos = (filp->f_pos - 1) << 2;
47 int flen; 47 int flen;
48 char *fname = NULL; 48 unsigned char *fname = NULL;
49 char *nameptr; 49 unsigned char *nameptr;
50 uint16_t liu; 50 uint16_t liu;
51 uint8_t lfi; 51 uint8_t lfi;
52 loff_t size = udf_ext0_offset(dir) + dir->i_size; 52 loff_t size = udf_ext0_offset(dir) + dir->i_size;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c7da1a32b364..b57ab0402d89 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1379,12 +1379,12 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
1379 return mode; 1379 return mode;
1380} 1380}
1381 1381
1382int udf_write_inode(struct inode *inode, int sync) 1382int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1383{ 1383{
1384 int ret; 1384 int ret;
1385 1385
1386 lock_kernel(); 1386 lock_kernel();
1387 ret = udf_update_inode(inode, sync); 1387 ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1388 unlock_kernel(); 1388 unlock_kernel();
1389 1389
1390 return ret; 1390 return ret;
@@ -1678,7 +1678,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1678 return -1; 1678 return -1;
1679 1679
1680 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) { 1680 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
1681 char *sptr, *dptr; 1681 unsigned char *sptr, *dptr;
1682 struct buffer_head *nbh; 1682 struct buffer_head *nbh;
1683 int err, loffset; 1683 int err, loffset;
1684 struct kernel_lb_addr obloc = epos->block; 1684 struct kernel_lb_addr obloc = epos->block;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 96757e3e3e04..db423ab078b1 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -34,8 +34,8 @@
34#include <linux/crc-itu-t.h> 34#include <linux/crc-itu-t.h>
35#include <linux/exportfs.h> 35#include <linux/exportfs.h>
36 36
37static inline int udf_match(int len1, const char *name1, int len2, 37static inline int udf_match(int len1, const unsigned char *name1, int len2,
38 const char *name2) 38 const unsigned char *name2)
39{ 39{
40 if (len1 != len2) 40 if (len1 != len2)
41 return 0; 41 return 0;
@@ -142,15 +142,15 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
142} 142}
143 143
144static struct fileIdentDesc *udf_find_entry(struct inode *dir, 144static struct fileIdentDesc *udf_find_entry(struct inode *dir,
145 struct qstr *child, 145 const struct qstr *child,
146 struct udf_fileident_bh *fibh, 146 struct udf_fileident_bh *fibh,
147 struct fileIdentDesc *cfi) 147 struct fileIdentDesc *cfi)
148{ 148{
149 struct fileIdentDesc *fi = NULL; 149 struct fileIdentDesc *fi = NULL;
150 loff_t f_pos; 150 loff_t f_pos;
151 int block, flen; 151 int block, flen;
152 char *fname = NULL; 152 unsigned char *fname = NULL;
153 char *nameptr; 153 unsigned char *nameptr;
154 uint8_t lfi; 154 uint8_t lfi;
155 uint16_t liu; 155 uint16_t liu;
156 loff_t size; 156 loff_t size;
@@ -308,7 +308,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
308{ 308{
309 struct super_block *sb = dir->i_sb; 309 struct super_block *sb = dir->i_sb;
310 struct fileIdentDesc *fi = NULL; 310 struct fileIdentDesc *fi = NULL;
311 char *name = NULL; 311 unsigned char *name = NULL;
312 int namelen; 312 int namelen;
313 loff_t f_pos; 313 loff_t f_pos;
314 loff_t size = udf_ext0_offset(dir) + dir->i_size; 314 loff_t size = udf_ext0_offset(dir) + dir->i_size;
@@ -895,16 +895,16 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
895{ 895{
896 struct inode *inode; 896 struct inode *inode;
897 struct pathComponent *pc; 897 struct pathComponent *pc;
898 char *compstart; 898 const char *compstart;
899 struct udf_fileident_bh fibh; 899 struct udf_fileident_bh fibh;
900 struct extent_position epos = {}; 900 struct extent_position epos = {};
901 int eoffset, elen = 0; 901 int eoffset, elen = 0;
902 struct fileIdentDesc *fi; 902 struct fileIdentDesc *fi;
903 struct fileIdentDesc cfi; 903 struct fileIdentDesc cfi;
904 char *ea; 904 uint8_t *ea;
905 int err; 905 int err;
906 int block; 906 int block;
907 char *name = NULL; 907 unsigned char *name = NULL;
908 int namelen; 908 int namelen;
909 struct buffer_head *bh; 909 struct buffer_head *bh;
910 struct udf_inode_info *iinfo; 910 struct udf_inode_info *iinfo;
@@ -982,7 +982,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
982 982
983 pc = (struct pathComponent *)(ea + elen); 983 pc = (struct pathComponent *)(ea + elen);
984 984
985 compstart = (char *)symname; 985 compstart = symname;
986 986
987 do { 987 do {
988 symname++; 988 symname++;
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index c3265e1385d4..852e91845688 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -32,12 +32,12 @@
32#include <linux/buffer_head.h> 32#include <linux/buffer_head.h>
33#include "udf_i.h" 33#include "udf_i.h"
34 34
35static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, 35static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
36 char *to) 36 int fromlen, unsigned char *to)
37{ 37{
38 struct pathComponent *pc; 38 struct pathComponent *pc;
39 int elen = 0; 39 int elen = 0;
40 char *p = to; 40 unsigned char *p = to;
41 41
42 while (elen < fromlen) { 42 while (elen < fromlen) {
43 pc = (struct pathComponent *)(from + elen); 43 pc = (struct pathComponent *)(from + elen);
@@ -75,9 +75,9 @@ static int udf_symlink_filler(struct file *file, struct page *page)
75{ 75{
76 struct inode *inode = page->mapping->host; 76 struct inode *inode = page->mapping->host;
77 struct buffer_head *bh = NULL; 77 struct buffer_head *bh = NULL;
78 char *symlink; 78 unsigned char *symlink;
79 int err = -EIO; 79 int err = -EIO;
80 char *p = kmap(page); 80 unsigned char *p = kmap(page);
81 struct udf_inode_info *iinfo; 81 struct udf_inode_info *iinfo;
82 82
83 lock_kernel(); 83 lock_kernel();
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 8d46f4294ee7..4223ac855da9 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -142,7 +142,7 @@ extern void udf_truncate(struct inode *);
142extern void udf_read_inode(struct inode *); 142extern void udf_read_inode(struct inode *);
143extern void udf_delete_inode(struct inode *); 143extern void udf_delete_inode(struct inode *);
144extern void udf_clear_inode(struct inode *); 144extern void udf_clear_inode(struct inode *);
145extern int udf_write_inode(struct inode *, int); 145extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
146extern long udf_block_map(struct inode *, sector_t); 146extern long udf_block_map(struct inode *, sector_t);
147extern int udf_extend_file(struct inode *, struct extent_position *, 147extern int udf_extend_file(struct inode *, struct extent_position *,
148 struct kernel_long_ad *, sector_t); 148 struct kernel_long_ad *, sector_t);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 22af68f8b682..317a0d444f6b 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -31,7 +31,7 @@
31 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. 31 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
32 */ 32 */
33static inline int ufs_match(struct super_block *sb, int len, 33static inline int ufs_match(struct super_block *sb, int len,
34 const char * const name, struct ufs_dir_entry * de) 34 const unsigned char *name, struct ufs_dir_entry *de)
35{ 35{
36 if (len != ufs_get_de_namlen(sb, de)) 36 if (len != ufs_get_de_namlen(sb, de))
37 return 0; 37 return 0;
@@ -70,7 +70,7 @@ static inline unsigned long ufs_dir_pages(struct inode *inode)
70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; 70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
71} 71}
72 72
73ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr) 73ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
74{ 74{
75 ino_t res = 0; 75 ino_t res = 0;
76 struct ufs_dir_entry *de; 76 struct ufs_dir_entry *de;
@@ -249,11 +249,11 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
249 * (as a parameter - res_dir). Page is returned mapped and unlocked. 249 * (as a parameter - res_dir). Page is returned mapped and unlocked.
250 * Entry is guaranteed to be valid. 250 * Entry is guaranteed to be valid.
251 */ 251 */
252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr, 252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
253 struct page **res_page) 253 struct page **res_page)
254{ 254{
255 struct super_block *sb = dir->i_sb; 255 struct super_block *sb = dir->i_sb;
256 const char *name = qstr->name; 256 const unsigned char *name = qstr->name;
257 int namelen = qstr->len; 257 int namelen = qstr->len;
258 unsigned reclen = UFS_DIR_REC_LEN(namelen); 258 unsigned reclen = UFS_DIR_REC_LEN(namelen);
259 unsigned long start, n; 259 unsigned long start, n;
@@ -313,7 +313,7 @@ found:
313int ufs_add_link(struct dentry *dentry, struct inode *inode) 313int ufs_add_link(struct dentry *dentry, struct inode *inode)
314{ 314{
315 struct inode *dir = dentry->d_parent->d_inode; 315 struct inode *dir = dentry->d_parent->d_inode;
316 const char *name = dentry->d_name.name; 316 const unsigned char *name = dentry->d_name.name;
317 int namelen = dentry->d_name.len; 317 int namelen = dentry->d_name.len;
318 struct super_block *sb = dir->i_sb; 318 struct super_block *sb = dir->i_sb;
319 unsigned reclen = UFS_DIR_REC_LEN(namelen); 319 unsigned reclen = UFS_DIR_REC_LEN(namelen);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 09aef49beedb..80b68c3702d1 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -36,6 +36,7 @@
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/smp_lock.h> 37#include <linux/smp_lock.h>
38#include <linux/buffer_head.h> 38#include <linux/buffer_head.h>
39#include <linux/writeback.h>
39#include <linux/quotaops.h> 40#include <linux/quotaops.h>
40 41
41#include "ufs_fs.h" 42#include "ufs_fs.h"
@@ -891,11 +892,11 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
891 return 0; 892 return 0;
892} 893}
893 894
894int ufs_write_inode (struct inode * inode, int wait) 895int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
895{ 896{
896 int ret; 897 int ret;
897 lock_kernel(); 898 lock_kernel();
898 ret = ufs_update_inode (inode, wait); 899 ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
899 unlock_kernel(); 900 unlock_kernel();
900 return ret; 901 return ret;
901} 902}
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 0b4c39bc0d9e..43f9f5d5670e 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
86/* dir.c */ 86/* dir.c */
87extern const struct inode_operations ufs_dir_inode_operations; 87extern const struct inode_operations ufs_dir_inode_operations;
88extern int ufs_add_link (struct dentry *, struct inode *); 88extern int ufs_add_link (struct dentry *, struct inode *);
89extern ino_t ufs_inode_by_name(struct inode *, struct qstr *); 89extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
90extern int ufs_make_empty(struct inode *, struct inode *); 90extern int ufs_make_empty(struct inode *, struct inode *);
91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **); 91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **);
92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *); 92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
93extern int ufs_empty_dir (struct inode *); 93extern int ufs_empty_dir (struct inode *);
94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); 94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
@@ -106,7 +106,7 @@ extern struct inode * ufs_new_inode (struct inode *, int);
106 106
107/* inode.c */ 107/* inode.c */
108extern struct inode *ufs_iget(struct super_block *, unsigned long); 108extern struct inode *ufs_iget(struct super_block *, unsigned long);
109extern int ufs_write_inode (struct inode *, int); 109extern int ufs_write_inode (struct inode *, struct writeback_control *);
110extern int ufs_sync_inode (struct inode *); 110extern int ufs_sync_inode (struct inode *);
111extern void ufs_delete_inode (struct inode *); 111extern void ufs_delete_inode (struct inode *);
112extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *); 112extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 25ea2408118f..71345a370d9f 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1063,7 +1063,7 @@ xfs_log_inode(
1063STATIC int 1063STATIC int
1064xfs_fs_write_inode( 1064xfs_fs_write_inode(
1065 struct inode *inode, 1065 struct inode *inode,
1066 int sync) 1066 struct writeback_control *wbc)
1067{ 1067{
1068 struct xfs_inode *ip = XFS_I(inode); 1068 struct xfs_inode *ip = XFS_I(inode);
1069 struct xfs_mount *mp = ip->i_mount; 1069 struct xfs_mount *mp = ip->i_mount;
@@ -1074,11 +1074,7 @@ xfs_fs_write_inode(
1074 if (XFS_FORCED_SHUTDOWN(mp)) 1074 if (XFS_FORCED_SHUTDOWN(mp))
1075 return XFS_ERROR(EIO); 1075 return XFS_ERROR(EIO);
1076 1076
1077 if (sync) { 1077 if (wbc->sync_mode == WB_SYNC_ALL) {
1078 error = xfs_wait_on_pages(ip, 0, -1);
1079 if (error)
1080 goto out;
1081
1082 /* 1078 /*
1083 * Make sure the inode has hit stable storage. By using the 1079 * Make sure the inode has hit stable storage. By using the
1084 * log and the fsync transactions we reduce the IOs we have 1080 * log and the fsync transactions we reduce the IOs we have
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 70504fcf14cd..14dafd608230 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -245,7 +245,7 @@ typedef struct xfs_mount {
245 struct xfs_qmops *m_qm_ops; /* vector of XQM ops */ 245 struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
246 atomic_t m_active_trans; /* number trans frozen */ 246 atomic_t m_active_trans; /* number trans frozen */
247#ifdef HAVE_PERCPU_SB 247#ifdef HAVE_PERCPU_SB
248 xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ 248 xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
249 unsigned long m_icsb_counters; /* disabled per-cpu counters */ 249 unsigned long m_icsb_counters; /* disabled per-cpu counters */
250 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ 250 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
251 struct mutex m_icsb_mutex; /* balancer sync lock */ 251 struct mutex m_icsb_mutex; /* balancer sync lock */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 29831768c0e6..1172c27adadf 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -238,7 +238,7 @@ struct acpi_processor_errata {
238 238
239extern int acpi_processor_preregister_performance(struct 239extern int acpi_processor_preregister_performance(struct
240 acpi_processor_performance 240 acpi_processor_performance
241 *performance); 241 __percpu *performance);
242 242
243extern int acpi_processor_register_performance(struct acpi_processor_performance 243extern int acpi_processor_register_performance(struct acpi_processor_performance
244 *performance, unsigned int cpu); 244 *performance, unsigned int cpu);
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index fc218444e315..c8a5d68541d7 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -52,23 +52,4 @@ typedef struct
52#define __local_add(i,l) local_set((l), local_read(l) + (i)) 52#define __local_add(i,l) local_set((l), local_read(l) + (i))
53#define __local_sub(i,l) local_set((l), local_read(l) - (i)) 53#define __local_sub(i,l) local_set((l), local_read(l) - (i))
54 54
55/* Use these for per-cpu local_t variables: on some archs they are
56 * much more efficient than these naive implementations. Note they take
57 * a variable (eg. mystruct.foo), not an address.
58 */
59#define cpu_local_read(l) local_read(&__get_cpu_var(l))
60#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
61#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
62#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
63#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
64#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
65
66/* Non-atomic increments, ie. preemption disabled and won't be touched
67 * in interrupt, etc. Some archs can optimize this case well.
68 */
69#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
70#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
71#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
72#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
73
74#endif /* _ASM_GENERIC_LOCAL_H */ 55#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 8087b90d4673..04f91c2d3f7b 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -41,7 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
41 * Only S390 provides its own means of moving the pointer. 41 * Only S390 provides its own means of moving the pointer.
42 */ 42 */
43#ifndef SHIFT_PERCPU_PTR 43#ifndef SHIFT_PERCPU_PTR
44#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) 44/* Weird cast keeps both GCC and sparse happy. */
45#define SHIFT_PERCPU_PTR(__p, __offset) ({ \
46 __verify_pcpu_ptr((__p)); \
47 RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
48})
45#endif 49#endif
46 50
47/* 51/*
@@ -50,11 +54,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
50 * offset. 54 * offset.
51 */ 55 */
52#define per_cpu(var, cpu) \ 56#define per_cpu(var, cpu) \
53 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) 57 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
54#define __get_cpu_var(var) \ 58#define __get_cpu_var(var) \
55 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) 59 (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
56#define __raw_get_cpu_var(var) \ 60#define __raw_get_cpu_var(var) \
57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) 61 (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
58 62
59#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) 63#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) 64#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
@@ -66,9 +70,9 @@ extern void setup_per_cpu_areas(void);
66 70
67#else /* ! SMP */ 71#else /* ! SMP */
68 72
69#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) 73#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
70#define __get_cpu_var(var) per_cpu_var(var) 74#define __get_cpu_var(var) (var)
71#define __raw_get_cpu_var(var) per_cpu_var(var) 75#define __raw_get_cpu_var(var) (var)
72#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) 76#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
73#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) 77#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
74 78
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index ffac157fb5b2..4a3c4e441027 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -801,6 +801,7 @@ struct drm_driver {
801 */ 801 */
802 int (*gem_init_object) (struct drm_gem_object *obj); 802 int (*gem_init_object) (struct drm_gem_object *obj);
803 void (*gem_free_object) (struct drm_gem_object *obj); 803 void (*gem_free_object) (struct drm_gem_object *obj);
804 void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
804 805
805 /* vga arb irq handler */ 806 /* vga arb irq handler */
806 void (*vgaarb_irq)(struct drm_device *dev, bool state); 807 void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1427,6 +1428,7 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1427int drm_gem_init(struct drm_device *dev); 1428int drm_gem_init(struct drm_device *dev);
1428void drm_gem_destroy(struct drm_device *dev); 1429void drm_gem_destroy(struct drm_device *dev);
1429void drm_gem_object_free(struct kref *kref); 1430void drm_gem_object_free(struct kref *kref);
1431void drm_gem_object_free_unlocked(struct kref *kref);
1430struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1432struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1431 size_t size); 1433 size_t size);
1432void drm_gem_object_handle_free(struct kref *kref); 1434void drm_gem_object_handle_free(struct kref *kref);
@@ -1443,10 +1445,15 @@ drm_gem_object_reference(struct drm_gem_object *obj)
1443static inline void 1445static inline void
1444drm_gem_object_unreference(struct drm_gem_object *obj) 1446drm_gem_object_unreference(struct drm_gem_object *obj)
1445{ 1447{
1446 if (obj == NULL) 1448 if (obj != NULL)
1447 return; 1449 kref_put(&obj->refcount, drm_gem_object_free);
1450}
1448 1451
1449 kref_put(&obj->refcount, drm_gem_object_free); 1452static inline void
1453drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1454{
1455 if (obj != NULL)
1456 kref_put(&obj->refcount, drm_gem_object_free_unlocked);
1450} 1457}
1451 1458
1452int drm_gem_handle_create(struct drm_file *file_priv, 1459int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1475,6 +1482,21 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1475 drm_gem_object_unreference(obj); 1482 drm_gem_object_unreference(obj);
1476} 1483}
1477 1484
1485static inline void
1486drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1487{
1488 if (obj == NULL)
1489 return;
1490
1491 /*
1492 * Must bump handle count first as this may be the last
1493 * ref, in which case the object would disappear before we
1494 * checked for a name
1495 */
1496 kref_put(&obj->handlecount, drm_gem_object_handle_free);
1497 drm_gem_object_unreference_unlocked(obj);
1498}
1499
1478struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1500struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1479 struct drm_file *filp, 1501 struct drm_file *filp,
1480 u32 handle); 1502 u32 handle);
diff --git a/include/drm/drm_buffer.h b/include/drm/drm_buffer.h
new file mode 100644
index 000000000000..322dbff3f861
--- /dev/null
+++ b/include/drm/drm_buffer.h
@@ -0,0 +1,148 @@
1/**************************************************************************
2 *
3 * Copyright 2010 Pauli Nieminen.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Multipart buffer for coping data which is larger than the page size.
30 *
31 * Authors:
32 * Pauli Nieminen <suokkos-at-gmail-dot-com>
33 */
34
35#ifndef _DRM_BUFFER_H_
36#define _DRM_BUFFER_H_
37
38#include "drmP.h"
39
40struct drm_buffer {
41 int iterator;
42 int size;
43 char *data[];
44};
45
46
47/**
48 * Return the index of page that buffer is currently pointing at.
49 */
50static inline int drm_buffer_page(struct drm_buffer *buf)
51{
52 return buf->iterator / PAGE_SIZE;
53}
54/**
55 * Return the index of the current byte in the page
56 */
57static inline int drm_buffer_index(struct drm_buffer *buf)
58{
59 return buf->iterator & (PAGE_SIZE - 1);
60}
61/**
62 * Return number of bytes that is left to process
63 */
64static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
65{
66 return buf->size - buf->iterator;
67}
68
69/**
70 * Advance the buffer iterator number of bytes that is given.
71 */
72static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
73{
74 buf->iterator += bytes;
75}
76
77/**
78 * Allocate the drm buffer object.
79 *
80 * buf: A pointer to a pointer where the object is stored.
81 * size: The number of bytes to allocate.
82 */
83extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
84
85/**
86 * Copy the user data to the begin of the buffer and reset the processing
87 * iterator.
88 *
89 * user_data: A pointer the data that is copied to the buffer.
90 * size: The Number of bytes to copy.
91 */
92extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
93 void __user *user_data, int size);
94
95/**
96 * Free the drm buffer object
97 */
98extern void drm_buffer_free(struct drm_buffer *buf);
99
100/**
101 * Read an object from buffer that may be split to multiple parts. If object
102 * is not split function just returns the pointer to object in buffer. But in
103 * case of split object data is copied to given stack object that is suplied
104 * by caller.
105 *
106 * The processing location of the buffer is also advanced to the next byte
107 * after the object.
108 *
109 * objsize: The size of the objet in bytes.
110 * stack_obj: A pointer to a memory location where object can be copied.
111 */
112extern void *drm_buffer_read_object(struct drm_buffer *buf,
113 int objsize, void *stack_obj);
114
115/**
116 * Returns the pointer to the dword which is offset number of elements from the
117 * current processing location.
118 *
119 * Caller must make sure that dword is not split in the buffer. This
120 * requirement is easily met if all the sizes of objects in buffer are
121 * multiples of dword and PAGE_SIZE is multiple dword.
122 *
123 * Call to this function doesn't change the processing location.
124 *
125 * offset: The index of the dword relative to the internat iterator.
126 */
127static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
128 int offset)
129{
130 int iter = buffer->iterator + offset * 4;
131 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
132}
133/**
134 * Returns the pointer to the dword which is offset number of elements from
135 * the current processing location.
136 *
137 * Call to this function doesn't change the processing location.
138 *
139 * offset: The index of the byte relative to the internat iterator.
140 */
141static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
142 int offset)
143{
144 int iter = buffer->iterator + offset;
145 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
146}
147
148#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index fdf43abc36db..1347524a8e30 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -801,4 +801,6 @@ extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
801 bool interlaced, int margins); 801 bool interlaced, int margins);
802extern int drm_add_modes_noedid(struct drm_connector *connector, 802extern int drm_add_modes_noedid(struct drm_connector *connector,
803 int hdisplay, int vdisplay); 803 int hdisplay, int vdisplay);
804
805extern bool drm_edid_is_valid(struct edid *edid);
804#endif /* __DRM_CRTC_H__ */ 806#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index d33c3e038606..b4209898f115 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -201,4 +201,7 @@ struct edid {
201 201
202#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) 202#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
203 203
204/* define the number of Extension EDID block */
205#define DRM_MAX_EDID_EXT_NUM 4
206
204#endif /* __DRM_EDID_H__ */ 207#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index e6f3b120f51a..676104b7818c 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -141,6 +141,41 @@
141 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 141 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
142 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 142 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
143 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 143 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
147 {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
148 {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
149 {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
150 {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
151 {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
152 {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
161 {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
162 {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
163 {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
164 {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
165 {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
166 {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
167 {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
168 {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
169 {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
170 {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
171 {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
172 {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
178 {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ 179 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 180 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 181 {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -558,4 +593,5 @@
558 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 593 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
559 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 594 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
560 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 595 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
596 {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
561 {0, 0, 0} 597 {0, 0, 0}
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index f745948b61e4..a6a9f4af5ebd 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -25,13 +25,14 @@
25#ifndef __NOUVEAU_DRM_H__ 25#ifndef __NOUVEAU_DRM_H__
26#define __NOUVEAU_DRM_H__ 26#define __NOUVEAU_DRM_H__
27 27
28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15 28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
29 29
30struct drm_nouveau_channel_alloc { 30struct drm_nouveau_channel_alloc {
31 uint32_t fb_ctxdma_handle; 31 uint32_t fb_ctxdma_handle;
32 uint32_t tt_ctxdma_handle; 32 uint32_t tt_ctxdma_handle;
33 33
34 int channel; 34 int channel;
35 uint32_t pushbuf_domains;
35 36
36 /* Notifier memory */ 37 /* Notifier memory */
37 uint32_t notifier_handle; 38 uint32_t notifier_handle;
@@ -109,68 +110,58 @@ struct drm_nouveau_gem_new {
109 uint32_t align; 110 uint32_t align;
110}; 111};
111 112
113#define NOUVEAU_GEM_MAX_BUFFERS 1024
114struct drm_nouveau_gem_pushbuf_bo_presumed {
115 uint32_t valid;
116 uint32_t domain;
117 uint64_t offset;
118};
119
112struct drm_nouveau_gem_pushbuf_bo { 120struct drm_nouveau_gem_pushbuf_bo {
113 uint64_t user_priv; 121 uint64_t user_priv;
114 uint32_t handle; 122 uint32_t handle;
115 uint32_t read_domains; 123 uint32_t read_domains;
116 uint32_t write_domains; 124 uint32_t write_domains;
117 uint32_t valid_domains; 125 uint32_t valid_domains;
118 uint32_t presumed_ok; 126 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
119 uint32_t presumed_domain;
120 uint64_t presumed_offset;
121}; 127};
122 128
123#define NOUVEAU_GEM_RELOC_LOW (1 << 0) 129#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
124#define NOUVEAU_GEM_RELOC_HIGH (1 << 1) 130#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
125#define NOUVEAU_GEM_RELOC_OR (1 << 2) 131#define NOUVEAU_GEM_RELOC_OR (1 << 2)
132#define NOUVEAU_GEM_MAX_RELOCS 1024
126struct drm_nouveau_gem_pushbuf_reloc { 133struct drm_nouveau_gem_pushbuf_reloc {
134 uint32_t reloc_bo_index;
135 uint32_t reloc_bo_offset;
127 uint32_t bo_index; 136 uint32_t bo_index;
128 uint32_t reloc_index;
129 uint32_t flags; 137 uint32_t flags;
130 uint32_t data; 138 uint32_t data;
131 uint32_t vor; 139 uint32_t vor;
132 uint32_t tor; 140 uint32_t tor;
133}; 141};
134 142
135#define NOUVEAU_GEM_MAX_BUFFERS 1024 143#define NOUVEAU_GEM_MAX_PUSH 512
136#define NOUVEAU_GEM_MAX_RELOCS 1024 144struct drm_nouveau_gem_pushbuf_push {
145 uint32_t bo_index;
146 uint32_t pad;
147 uint64_t offset;
148 uint64_t length;
149};
137 150
138struct drm_nouveau_gem_pushbuf { 151struct drm_nouveau_gem_pushbuf {
139 uint32_t channel; 152 uint32_t channel;
140 uint32_t nr_dwords;
141 uint32_t nr_buffers; 153 uint32_t nr_buffers;
142 uint32_t nr_relocs;
143 uint64_t dwords;
144 uint64_t buffers; 154 uint64_t buffers;
145 uint64_t relocs;
146};
147
148struct drm_nouveau_gem_pushbuf_call {
149 uint32_t channel;
150 uint32_t handle;
151 uint32_t offset;
152 uint32_t nr_buffers;
153 uint32_t nr_relocs; 155 uint32_t nr_relocs;
154 uint32_t nr_dwords; 156 uint32_t nr_push;
155 uint64_t buffers;
156 uint64_t relocs; 157 uint64_t relocs;
158 uint64_t push;
157 uint32_t suffix0; 159 uint32_t suffix0;
158 uint32_t suffix1; 160 uint32_t suffix1;
159 /* below only accessed for CALL2 */
160 uint64_t vram_available; 161 uint64_t vram_available;
161 uint64_t gart_available; 162 uint64_t gart_available;
162}; 163};
163 164
164struct drm_nouveau_gem_pin {
165 uint32_t handle;
166 uint32_t domain;
167 uint64_t offset;
168};
169
170struct drm_nouveau_gem_unpin {
171 uint32_t handle;
172};
173
174#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 165#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
175#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 166#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
176#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 167#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
@@ -183,14 +174,6 @@ struct drm_nouveau_gem_cpu_fini {
183 uint32_t handle; 174 uint32_t handle;
184}; 175};
185 176
186struct drm_nouveau_gem_tile {
187 uint32_t handle;
188 uint32_t offset;
189 uint32_t size;
190 uint32_t tile_mode;
191 uint32_t tile_flags;
192};
193
194enum nouveau_bus_type { 177enum nouveau_bus_type {
195 NV_AGP = 0, 178 NV_AGP = 0,
196 NV_PCI = 1, 179 NV_PCI = 1,
@@ -200,22 +183,17 @@ enum nouveau_bus_type {
200struct drm_nouveau_sarea { 183struct drm_nouveau_sarea {
201}; 184};
202 185
203#define DRM_NOUVEAU_CARD_INIT 0x00 186#define DRM_NOUVEAU_GETPARAM 0x00
204#define DRM_NOUVEAU_GETPARAM 0x01 187#define DRM_NOUVEAU_SETPARAM 0x01
205#define DRM_NOUVEAU_SETPARAM 0x02 188#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
206#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 189#define DRM_NOUVEAU_CHANNEL_FREE 0x03
207#define DRM_NOUVEAU_CHANNEL_FREE 0x04 190#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
208#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 191#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
209#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 192#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
210#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
211#define DRM_NOUVEAU_GEM_NEW 0x40 193#define DRM_NOUVEAU_GEM_NEW 0x40
212#define DRM_NOUVEAU_GEM_PUSHBUF 0x41 194#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
213#define DRM_NOUVEAU_GEM_PUSHBUF_CALL 0x42 195#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
214#define DRM_NOUVEAU_GEM_PIN 0x43 /* !KMS only */ 196#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
215#define DRM_NOUVEAU_GEM_UNPIN 0x44 /* !KMS only */ 197#define DRM_NOUVEAU_GEM_INFO 0x44
216#define DRM_NOUVEAU_GEM_CPU_PREP 0x45
217#define DRM_NOUVEAU_GEM_CPU_FINI 0x46
218#define DRM_NOUVEAU_GEM_INFO 0x47
219#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48
220 198
221#endif /* __NOUVEAU_DRM_H__ */ 199#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 39537f3cf98a..81e614bf2dc3 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -808,6 +808,7 @@ struct drm_radeon_gem_create {
808#define RADEON_TILING_SWAP_32BIT 0x8 808#define RADEON_TILING_SWAP_32BIT 0x8
809#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface 809#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
810 * when mapped - i.e. front buffer */ 810 * when mapped - i.e. front buffer */
811#define RADEON_TILING_MICRO_SQUARE 0x20
811 812
812struct drm_radeon_gem_set_tiling { 813struct drm_radeon_gem_set_tiling {
813 uint32_t handle; 814 uint32_t handle;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4c4e0f8375b3..e3f1b4a4b601 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -908,7 +908,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
908 * Utility function that returns the pgprot_t that should be used for 908 * Utility function that returns the pgprot_t that should be used for
909 * setting up a PTE with the caching model indicated by @c_state. 909 * setting up a PTE with the caching model indicated by @c_state.
910 */ 910 */
911extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp); 911extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
912 912
913#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 913#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
914#define TTM_HAS_AGP 914#define TTM_HAS_AGP
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 3c7a358241a7..f391d45c8aea 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -424,7 +424,7 @@ extern void audit_syscall_exit(int failed, long return_code);
424extern void __audit_getname(const char *name); 424extern void __audit_getname(const char *name);
425extern void audit_putname(const char *name); 425extern void audit_putname(const char *name);
426extern void __audit_inode(const char *name, const struct dentry *dentry); 426extern void __audit_inode(const char *name, const struct dentry *dentry);
427extern void __audit_inode_child(const char *dname, const struct dentry *dentry, 427extern void __audit_inode_child(const struct dentry *dentry,
428 const struct inode *parent); 428 const struct inode *parent);
429extern void __audit_ptrace(struct task_struct *t); 429extern void __audit_ptrace(struct task_struct *t);
430 430
@@ -442,11 +442,10 @@ static inline void audit_inode(const char *name, const struct dentry *dentry) {
442 if (unlikely(!audit_dummy_context())) 442 if (unlikely(!audit_dummy_context()))
443 __audit_inode(name, dentry); 443 __audit_inode(name, dentry);
444} 444}
445static inline void audit_inode_child(const char *dname, 445static inline void audit_inode_child(const struct dentry *dentry,
446 const struct dentry *dentry,
447 const struct inode *parent) { 446 const struct inode *parent) {
448 if (unlikely(!audit_dummy_context())) 447 if (unlikely(!audit_dummy_context()))
449 __audit_inode_child(dname, dentry, parent); 448 __audit_inode_child(dentry, parent);
450} 449}
451void audit_core_dumps(long signr); 450void audit_core_dumps(long signr);
452 451
@@ -544,9 +543,9 @@ extern int audit_signals;
544#define audit_getname(n) do { ; } while (0) 543#define audit_getname(n) do { ; } while (0)
545#define audit_putname(n) do { ; } while (0) 544#define audit_putname(n) do { ; } while (0)
546#define __audit_inode(n,d) do { ; } while (0) 545#define __audit_inode(n,d) do { ; } while (0)
547#define __audit_inode_child(d,i,p) do { ; } while (0) 546#define __audit_inode_child(i,p) do { ; } while (0)
548#define audit_inode(n,d) do { ; } while (0) 547#define audit_inode(n,d) do { ; } while (0)
549#define audit_inode_child(d,i,p) do { ; } while (0) 548#define audit_inode_child(i,p) do { ; } while (0)
550#define audit_core_dumps(i) do { ; } while (0) 549#define audit_core_dumps(i) do { ; } while (0)
551#define auditsc_get_stamp(c,t,s) (0) 550#define auditsc_get_stamp(c,t,s) (0)
552#define audit_get_loginuid(t) (-1) 551#define audit_get_loginuid(t) (-1)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3b73b9992b26..416bf62d6d46 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -150,8 +150,8 @@ struct blk_user_trace_setup {
150struct blk_trace { 150struct blk_trace {
151 int trace_state; 151 int trace_state;
152 struct rchan *rchan; 152 struct rchan *rchan;
153 unsigned long *sequence; 153 unsigned long __percpu *sequence;
154 unsigned char *msg_data; 154 unsigned char __percpu *msg_data;
155 u16 act_mask; 155 u16 act_mask;
156 u64 start_lba; 156 u64 start_lba;
157 u64 end_lba; 157 u64 end_lba;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index b10ec49ee2dd..266ab9291232 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -23,6 +23,7 @@ extern unsigned long max_pfn;
23extern unsigned long saved_max_pfn; 23extern unsigned long saved_max_pfn;
24#endif 24#endif
25 25
26#ifndef CONFIG_NO_BOOTMEM
26/* 27/*
27 * node_bootmem_map is a map pointer - the bits represent all physical 28 * node_bootmem_map is a map pointer - the bits represent all physical
28 * memory pages (including holes) on the node. 29 * memory pages (including holes) on the node.
@@ -37,6 +38,7 @@ typedef struct bootmem_data {
37} bootmem_data_t; 38} bootmem_data_t;
38 39
39extern bootmem_data_t bootmem_node_data[]; 40extern bootmem_data_t bootmem_node_data[];
41#endif
40 42
41extern unsigned long bootmem_bootmap_pages(unsigned long); 43extern unsigned long bootmem_bootmap_pages(unsigned long);
42 44
@@ -46,6 +48,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
46 unsigned long endpfn); 48 unsigned long endpfn);
47extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 49extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
48 50
51unsigned long free_all_memory_core_early(int nodeid);
49extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); 52extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
50extern unsigned long free_all_bootmem(void); 53extern unsigned long free_all_bootmem(void);
51 54
@@ -84,6 +87,10 @@ extern void *__alloc_bootmem_node(pg_data_t *pgdat,
84 unsigned long size, 87 unsigned long size,
85 unsigned long align, 88 unsigned long align,
86 unsigned long goal); 89 unsigned long goal);
90void *__alloc_bootmem_node_high(pg_data_t *pgdat,
91 unsigned long size,
92 unsigned long align,
93 unsigned long goal);
87extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, 94extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
88 unsigned long size, 95 unsigned long size,
89 unsigned long align, 96 unsigned long align,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 188fcae10a99..a5a472b10746 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,7 +5,7 @@
5 5
6#ifdef __CHECKER__ 6#ifdef __CHECKER__
7# define __user __attribute__((noderef, address_space(1))) 7# define __user __attribute__((noderef, address_space(1)))
8# define __kernel /* default address space */ 8# define __kernel __attribute__((address_space(0)))
9# define __safe __attribute__((safe)) 9# define __safe __attribute__((safe))
10# define __force __attribute__((force)) 10# define __force __attribute__((force))
11# define __nocast __attribute__((nocast)) 11# define __nocast __attribute__((nocast))
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 78784982b33e..20ea12c86fd0 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -31,6 +31,8 @@
31 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 31 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
32 */ 32 */
33typedef s32 dma_cookie_t; 33typedef s32 dma_cookie_t;
34#define DMA_MIN_COOKIE 1
35#define DMA_MAX_COOKIE INT_MAX
34 36
35#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) 37#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
36 38
@@ -162,7 +164,7 @@ struct dma_chan {
162 struct dma_chan_dev *dev; 164 struct dma_chan_dev *dev;
163 165
164 struct list_head device_node; 166 struct list_head device_node;
165 struct dma_chan_percpu *local; 167 struct dma_chan_percpu __percpu *local;
166 int client_count; 168 int client_count;
167 int table_count; 169 int table_count;
168 void *private; 170 void *private;
diff --git a/include/linux/early_res.h b/include/linux/early_res.h
new file mode 100644
index 000000000000..29c09f57a13c
--- /dev/null
+++ b/include/linux/early_res.h
@@ -0,0 +1,23 @@
1#ifndef _LINUX_EARLY_RES_H
2#define _LINUX_EARLY_RES_H
3#ifdef __KERNEL__
4
5extern void reserve_early(u64 start, u64 end, char *name);
6extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
7extern void free_early(u64 start, u64 end);
8void free_early_partial(u64 start, u64 end);
9extern void early_res_to_bootmem(u64 start, u64 end);
10
11void reserve_early_without_check(u64 start, u64 end, char *name);
12u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
13 u64 size, u64 align);
14u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
15 u64 *sizep, u64 align);
16u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
17u64 get_max_mapped(void);
18#include <linux/range.h>
19int get_free_all_memory_range(struct range **rangep, int nodeid);
20
21#endif /* __KERNEL__ */
22
23#endif /* _LINUX_EARLY_RES_H */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index e6590f8f0b3c..cac84b006667 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -894,7 +894,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
894 int create); 894 int create);
895 895
896extern struct inode *ext3_iget(struct super_block *, unsigned long); 896extern struct inode *ext3_iget(struct super_block *, unsigned long);
897extern int ext3_write_inode (struct inode *, int); 897extern int ext3_write_inode (struct inode *, struct writeback_control *);
898extern int ext3_setattr (struct dentry *, struct iattr *); 898extern int ext3_setattr (struct dentry *, struct iattr *);
899extern void ext3_delete_inode (struct inode *); 899extern void ext3_delete_inode (struct inode *);
900extern int ext3_sync_inode (handle_t *, struct inode *); 900extern int ext3_sync_inode (handle_t *, struct inode *);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 369767bd873e..c10163b4c40e 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -543,6 +543,8 @@ struct fb_cursor_user {
543#define FB_EVENT_GET_REQ 0x0D 543#define FB_EVENT_GET_REQ 0x0D
544/* Unbind from the console if possible */ 544/* Unbind from the console if possible */
545#define FB_EVENT_FB_UNBIND 0x0E 545#define FB_EVENT_FB_UNBIND 0x0E
546/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
547#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
546 548
547struct fb_event { 549struct fb_event {
548 struct fb_info *info; 550 struct fb_info *info;
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 520ecf86cbb3..40b11013408e 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -248,13 +248,20 @@ union fw_cdev_event {
248#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) 248#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
249#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet) 249#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
250 250
251/* available since kernel version 2.6.34 */
252#define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2)
253
251/* 254/*
252 * FW_CDEV_VERSION History 255 * FW_CDEV_VERSION History
253 * 1 (2.6.22) - initial version 256 * 1 (2.6.22) - initial version
254 * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if 257 * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
255 * &fw_cdev_create_iso_context.header_size is 8 or more 258 * &fw_cdev_create_iso_context.header_size is 8 or more
259 * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt
260 * (2.6.33) - IR has always packet-per-buffer semantics now, not one of
261 * dual-buffer or packet-per-buffer depending on hardware
262 * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable
256 */ 263 */
257#define FW_CDEV_VERSION 2 264#define FW_CDEV_VERSION 3
258 265
259/** 266/**
260 * struct fw_cdev_get_info - General purpose information ioctl 267 * struct fw_cdev_get_info - General purpose information ioctl
@@ -544,14 +551,18 @@ struct fw_cdev_stop_iso {
544/** 551/**
545 * struct fw_cdev_get_cycle_timer - read cycle timer register 552 * struct fw_cdev_get_cycle_timer - read cycle timer register
546 * @local_time: system time, in microseconds since the Epoch 553 * @local_time: system time, in microseconds since the Epoch
547 * @cycle_timer: isochronous cycle timer, as per OHCI 1.1 clause 5.13 554 * @cycle_timer: Cycle Time register contents
548 * 555 *
549 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer 556 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
550 * and also the system clock. This allows to express the receive time of an 557 * and also the system clock (%CLOCK_REALTIME). This allows to express the
551 * isochronous packet as a system time with microsecond accuracy. 558 * receive time of an isochronous packet as a system time.
552 * 559 *
553 * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and 560 * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
554 * 12 bits cycleOffset, in host byte order. 561 * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register
562 * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394.
563 *
564 * In version 1 and 2 of the ABI, this ioctl returned unreliable (non-
565 * monotonic) @cycle_timer values on certain controllers.
555 */ 566 */
556struct fw_cdev_get_cycle_timer { 567struct fw_cdev_get_cycle_timer {
557 __u64 local_time; 568 __u64 local_time;
@@ -559,6 +570,25 @@ struct fw_cdev_get_cycle_timer {
559}; 570};
560 571
561/** 572/**
573 * struct fw_cdev_get_cycle_timer2 - read cycle timer register
574 * @tv_sec: system time, seconds
575 * @tv_nsec: system time, sub-seconds part in nanoseconds
576 * @clk_id: input parameter, clock from which to get the system time
577 * @cycle_timer: Cycle Time register contents
578 *
579 * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 works like
580 * %FW_CDEV_IOC_GET_CYCLE_TIMER but lets you choose a clock like with POSIX'
581 * clock_gettime function. Supported @clk_id values are POSIX' %CLOCK_REALTIME
582 * and %CLOCK_MONOTONIC and Linux' %CLOCK_MONOTONIC_RAW.
583 */
584struct fw_cdev_get_cycle_timer2 {
585 __s64 tv_sec;
586 __s32 tv_nsec;
587 __s32 clk_id;
588 __u32 cycle_timer;
589};
590
591/**
562 * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth 592 * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
563 * @closure: Passed back to userspace in correponding iso resource events 593 * @closure: Passed back to userspace in correponding iso resource events
564 * @channels: Isochronous channels of which one is to be (de)allocated 594 * @channels: Isochronous channels of which one is to be (de)allocated
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index a0e67150a729..4bd94bf5e739 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -65,12 +65,13 @@
65#define CSR_DIRECTORY_ID 0x20 65#define CSR_DIRECTORY_ID 0x20
66 66
67struct fw_csr_iterator { 67struct fw_csr_iterator {
68 u32 *p; 68 const u32 *p;
69 u32 *end; 69 const u32 *end;
70}; 70};
71 71
72void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p); 72void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
73int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); 73int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
74int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
74 75
75extern struct bus_type fw_bus_type; 76extern struct bus_type fw_bus_type;
76 77
@@ -162,7 +163,7 @@ struct fw_device {
162 struct mutex client_list_mutex; 163 struct mutex client_list_mutex;
163 struct list_head client_list; 164 struct list_head client_list;
164 165
165 u32 *config_rom; 166 const u32 *config_rom;
166 size_t config_rom_length; 167 size_t config_rom_length;
167 int config_rom_retries; 168 int config_rom_retries;
168 unsigned is_local:1; 169 unsigned is_local:1;
@@ -204,7 +205,7 @@ int fw_device_enable_phys_dma(struct fw_device *device);
204 */ 205 */
205struct fw_unit { 206struct fw_unit {
206 struct device device; 207 struct device device;
207 u32 *directory; 208 const u32 *directory;
208 struct fw_attribute_group attribute_group; 209 struct fw_attribute_group attribute_group;
209}; 210};
210 211
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ebb1cd5bc241..45689621a851 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1305,6 +1305,8 @@ extern int send_sigurg(struct fown_struct *fown);
1305#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ 1305#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
1306#define MNT_DETACH 0x00000002 /* Just detach from the tree */ 1306#define MNT_DETACH 0x00000002 /* Just detach from the tree */
1307#define MNT_EXPIRE 0x00000004 /* Mark for expiry */ 1307#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
1308#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1309#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1308 1310
1309extern struct list_head super_blocks; 1311extern struct list_head super_blocks;
1310extern spinlock_t sb_lock; 1312extern spinlock_t sb_lock;
@@ -1314,9 +1316,9 @@ extern spinlock_t sb_lock;
1314struct super_block { 1316struct super_block {
1315 struct list_head s_list; /* Keep this first */ 1317 struct list_head s_list; /* Keep this first */
1316 dev_t s_dev; /* search index; _not_ kdev_t */ 1318 dev_t s_dev; /* search index; _not_ kdev_t */
1317 unsigned long s_blocksize;
1318 unsigned char s_blocksize_bits;
1319 unsigned char s_dirt; 1319 unsigned char s_dirt;
1320 unsigned char s_blocksize_bits;
1321 unsigned long s_blocksize;
1320 loff_t s_maxbytes; /* Max file size */ 1322 loff_t s_maxbytes; /* Max file size */
1321 struct file_system_type *s_type; 1323 struct file_system_type *s_type;
1322 const struct super_operations *s_op; 1324 const struct super_operations *s_op;
@@ -1357,16 +1359,16 @@ struct super_block {
1357 void *s_fs_info; /* Filesystem private info */ 1359 void *s_fs_info; /* Filesystem private info */
1358 fmode_t s_mode; 1360 fmode_t s_mode;
1359 1361
1362 /* Granularity of c/m/atime in ns.
1363 Cannot be worse than a second */
1364 u32 s_time_gran;
1365
1360 /* 1366 /*
1361 * The next field is for VFS *only*. No filesystems have any business 1367 * The next field is for VFS *only*. No filesystems have any business
1362 * even looking at it. You had been warned. 1368 * even looking at it. You had been warned.
1363 */ 1369 */
1364 struct mutex s_vfs_rename_mutex; /* Kludge */ 1370 struct mutex s_vfs_rename_mutex; /* Kludge */
1365 1371
1366 /* Granularity of c/m/atime in ns.
1367 Cannot be worse than a second */
1368 u32 s_time_gran;
1369
1370 /* 1372 /*
1371 * Filesystem subtype. If non-empty the filesystem type field 1373 * Filesystem subtype. If non-empty the filesystem type field
1372 * in /proc/mounts will be "type.subtype" 1374 * in /proc/mounts will be "type.subtype"
@@ -1555,7 +1557,7 @@ struct super_operations {
1555 void (*destroy_inode)(struct inode *); 1557 void (*destroy_inode)(struct inode *);
1556 1558
1557 void (*dirty_inode) (struct inode *); 1559 void (*dirty_inode) (struct inode *);
1558 int (*write_inode) (struct inode *, int); 1560 int (*write_inode) (struct inode *, struct writeback_control *wbc);
1559 void (*drop_inode) (struct inode *); 1561 void (*drop_inode) (struct inode *);
1560 void (*delete_inode) (struct inode *); 1562 void (*delete_inode) (struct inode *);
1561 void (*put_super) (struct super_block *); 1563 void (*put_super) (struct super_block *);
@@ -1794,7 +1796,8 @@ extern int may_umount(struct vfsmount *);
1794extern long do_mount(char *, char *, char *, unsigned long, void *); 1796extern long do_mount(char *, char *, char *, unsigned long, void *);
1795extern struct vfsmount *collect_mounts(struct path *); 1797extern struct vfsmount *collect_mounts(struct path *);
1796extern void drop_collected_mounts(struct vfsmount *); 1798extern void drop_collected_mounts(struct vfsmount *);
1797 1799extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
1800 struct vfsmount *);
1798extern int vfs_statfs(struct dentry *, struct kstatfs *); 1801extern int vfs_statfs(struct dentry *, struct kstatfs *);
1799 1802
1800extern int current_umask(void); 1803extern int current_umask(void);
@@ -2058,12 +2061,6 @@ extern int invalidate_inodes(struct super_block *);
2058unsigned long invalidate_mapping_pages(struct address_space *mapping, 2061unsigned long invalidate_mapping_pages(struct address_space *mapping,
2059 pgoff_t start, pgoff_t end); 2062 pgoff_t start, pgoff_t end);
2060 2063
2061static inline unsigned long __deprecated
2062invalidate_inode_pages(struct address_space *mapping)
2063{
2064 return invalidate_mapping_pages(mapping, 0, ~0UL);
2065}
2066
2067static inline void invalidate_remote_inode(struct inode *inode) 2064static inline void invalidate_remote_inode(struct inode *inode)
2068{ 2065{
2069 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 2066 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
@@ -2132,6 +2129,7 @@ extern struct file * open_exec(const char *);
2132 2129
2133/* fs/dcache.c -- generic fs support functions */ 2130/* fs/dcache.c -- generic fs support functions */
2134extern int is_subdir(struct dentry *, struct dentry *); 2131extern int is_subdir(struct dentry *, struct dentry *);
2132extern int path_is_under(struct path *, struct path *);
2135extern ino_t find_inode_number(struct dentry *, struct qstr *); 2133extern ino_t find_inode_number(struct dentry *, struct qstr *);
2136 2134
2137#include <linux/err.h> 2135#include <linux/err.h>
@@ -2340,8 +2338,6 @@ extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct
2340extern int simple_sync_file(struct file *, struct dentry *, int); 2338extern int simple_sync_file(struct file *, struct dentry *, int);
2341extern int simple_empty(struct dentry *); 2339extern int simple_empty(struct dentry *);
2342extern int simple_readpage(struct file *file, struct page *page); 2340extern int simple_readpage(struct file *file, struct page *page);
2343extern int simple_prepare_write(struct file *file, struct page *page,
2344 unsigned offset, unsigned to);
2345extern int simple_write_begin(struct file *file, struct address_space *mapping, 2341extern int simple_write_begin(struct file *file, struct address_space *mapping,
2346 loff_t pos, unsigned len, unsigned flags, 2342 loff_t pos, unsigned len, unsigned flags,
2347 struct page **pagep, void **fsdata); 2343 struct page **pagep, void **fsdata);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 936f9aa8bb97..df8fd9a3b214 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -65,7 +65,7 @@ static inline void fsnotify_link_count(struct inode *inode)
65 * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir 65 * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
66 */ 66 */
67static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, 67static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
68 const char *old_name, const char *new_name, 68 const char *old_name,
69 int isdir, struct inode *target, struct dentry *moved) 69 int isdir, struct inode *target, struct dentry *moved)
70{ 70{
71 struct inode *source = moved->d_inode; 71 struct inode *source = moved->d_inode;
@@ -73,6 +73,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
73 u32 fs_cookie = fsnotify_get_cookie(); 73 u32 fs_cookie = fsnotify_get_cookie();
74 __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); 74 __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
75 __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); 75 __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
76 const char *new_name = moved->d_name.name;
76 77
77 if (old_dir == new_dir) 78 if (old_dir == new_dir)
78 old_dir_mask |= FS_DN_RENAME; 79 old_dir_mask |= FS_DN_RENAME;
@@ -103,7 +104,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
103 inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); 104 inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
104 fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); 105 fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
105 } 106 }
106 audit_inode_child(new_name, moved, new_dir); 107 audit_inode_child(moved, new_dir);
107} 108}
108 109
109/* 110/*
@@ -146,7 +147,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
146{ 147{
147 inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, 148 inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
148 dentry->d_inode); 149 dentry->d_inode);
149 audit_inode_child(dentry->d_name.name, dentry, inode); 150 audit_inode_child(dentry, inode);
150 151
151 fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); 152 fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
152} 153}
@@ -161,7 +162,7 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
161 inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, 162 inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
162 inode); 163 inode);
163 fsnotify_link_count(inode); 164 fsnotify_link_count(inode);
164 audit_inode_child(new_dentry->d_name.name, new_dentry, dir); 165 audit_inode_child(new_dentry, dir);
165 166
166 fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0); 167 fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
167} 168}
@@ -175,7 +176,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
175 struct inode *d_inode = dentry->d_inode; 176 struct inode *d_inode = dentry->d_inode;
176 177
177 inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode); 178 inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
178 audit_inode_child(dentry->d_name.name, dentry, inode); 179 audit_inode_child(dentry, inode);
179 180
180 fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); 181 fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
181} 182}
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 9717081c75ad..56b50514ab25 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -101,7 +101,7 @@ struct hd_struct {
101 unsigned long stamp; 101 unsigned long stamp;
102 int in_flight[2]; 102 int in_flight[2];
103#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
104 struct disk_stats *dkstats; 104 struct disk_stats __percpu *dkstats;
105#else 105#else
106 struct disk_stats dkstats; 106 struct disk_stats dkstats;
107#endif 107#endif
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index 81f90a59cda6..4f4462974c14 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -180,33 +180,6 @@ struct gfs2_rgrp {
180}; 180};
181 181
182/* 182/*
183 * quota linked list: user quotas and group quotas form two separate
184 * singly linked lists. ll_next stores uids or gids of next quotas in the
185 * linked list.
186
187Given the uid/gid, how to calculate the quota file offsets for the corresponding
188gfs2_quota structures on disk:
189
190for user quotas, given uid,
191offset = uid * sizeof(struct gfs2_quota);
192
193for group quotas, given gid,
194offset = (gid * sizeof(struct gfs2_quota)) + sizeof(struct gfs2_quota);
195
196
197 uid:0 gid:0 uid:12 gid:12 uid:17 gid:17 uid:5142 gid:5142
198+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
199| valid | valid | :: | valid | valid | :: | valid | inval | :: | inval | valid |
200+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
201next:12 next:12 next:17 next:5142 next:NULL next:NULL
202 | | | | |<-- user quota list |
203 \______|___________/ \______|___________/ group quota list -->|
204 | | |
205 \__________________/ \_______________________________________/
206
207*/
208
209/*
210 * quota structure 183 * quota structure
211 */ 184 */
212 185
@@ -214,8 +187,7 @@ struct gfs2_quota {
214 __be64 qu_limit; 187 __be64 qu_limit;
215 __be64 qu_warn; 188 __be64 qu_warn;
216 __be64 qu_value; 189 __be64 qu_value;
217 __be32 qu_ll_next; /* location of next quota in list */ 190 __u8 qu_reserved[64];
218 __u8 qu_reserved[60];
219}; 191};
220 192
221/* 193/*
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
new file mode 100644
index 000000000000..63f57a8c8b31
--- /dev/null
+++ b/include/linux/i2c-smbus.h
@@ -0,0 +1,50 @@
1/*
2 * i2c-smbus.h - SMBus extensions to the I2C protocol
3 *
4 * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _LINUX_I2C_SMBUS_H
22#define _LINUX_I2C_SMBUS_H
23
24#include <linux/i2c.h>
25
26
27/**
28 * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
29 * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
30 * triggered
31 * @irq: IRQ number, if the smbus_alert driver should take care of interrupt
32 * handling
33 *
34 * If irq is not specified, the smbus_alert driver doesn't take care of
35 * interrupt handling. In that case it is up to the I2C bus driver to either
36 * handle the interrupts or to poll for alerts.
37 *
38 * If irq is specified then it it crucial that alert_edge_triggered is
39 * properly set.
40 */
41struct i2c_smbus_alert_setup {
42 unsigned int alert_edge_triggered:1;
43 int irq;
44};
45
46struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
47 struct i2c_smbus_alert_setup *setup);
48int i2c_handle_smbus_alert(struct i2c_client *ara);
49
50#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 02fc617782ef..0a5da639b327 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -53,6 +53,7 @@ struct i2c_board_info;
53 * on a bus (or read from them). Apart from two basic transfer functions to 53 * on a bus (or read from them). Apart from two basic transfer functions to
54 * transmit one message at a time, a more complex version can be used to 54 * transmit one message at a time, a more complex version can be used to
55 * transmit an arbitrary number of messages without interruption. 55 * transmit an arbitrary number of messages without interruption.
56 * @count must be be less than 64k since msg.len is u16.
56 */ 57 */
57extern int i2c_master_send(struct i2c_client *client, const char *buf, 58extern int i2c_master_send(struct i2c_client *client, const char *buf,
58 int count); 59 int count);
@@ -152,6 +153,13 @@ struct i2c_driver {
152 int (*suspend)(struct i2c_client *, pm_message_t mesg); 153 int (*suspend)(struct i2c_client *, pm_message_t mesg);
153 int (*resume)(struct i2c_client *); 154 int (*resume)(struct i2c_client *);
154 155
156 /* Alert callback, for example for the SMBus alert protocol.
157 * The format and meaning of the data value depends on the protocol.
158 * For the SMBus alert protocol, there is a single bit of data passed
159 * as the alert response's low bit ("event flag").
160 */
161 void (*alert)(struct i2c_client *, unsigned int data);
162
155 /* a ioctl like command that can be used to perform specific functions 163 /* a ioctl like command that can be used to perform specific functions
156 * with the device. 164 * with the device.
157 */ 165 */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 0ec612959042..97e6ab435184 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -515,6 +515,8 @@ struct ide_drive_s {
515 u8 init_speed; /* transfer rate set at boot */ 515 u8 init_speed; /* transfer rate set at boot */
516 u8 current_speed; /* current transfer rate set */ 516 u8 current_speed; /* current transfer rate set */
517 u8 desired_speed; /* desired transfer rate set */ 517 u8 desired_speed; /* desired transfer rate set */
518 u8 pio_mode; /* for ->set_pio_mode _only_ */
519 u8 dma_mode; /* for ->dma_pio_mode _only_ */
518 u8 dn; /* now wide spread use */ 520 u8 dn; /* now wide spread use */
519 u8 acoustic; /* acoustic management */ 521 u8 acoustic; /* acoustic management */
520 u8 media; /* disk, cdrom, tape, floppy, ... */ 522 u8 media; /* disk, cdrom, tape, floppy, ... */
@@ -622,8 +624,8 @@ extern const struct ide_tp_ops default_tp_ops;
622 */ 624 */
623struct ide_port_ops { 625struct ide_port_ops {
624 void (*init_dev)(ide_drive_t *); 626 void (*init_dev)(ide_drive_t *);
625 void (*set_pio_mode)(ide_drive_t *, const u8); 627 void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
626 void (*set_dma_mode)(ide_drive_t *, const u8); 628 void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
627 int (*reset_poll)(ide_drive_t *); 629 int (*reset_poll)(ide_drive_t *);
628 void (*pre_reset)(ide_drive_t *); 630 void (*pre_reset)(ide_drive_t *);
629 void (*resetproc)(ide_drive_t *); 631 void (*resetproc)(ide_drive_t *);
@@ -1494,7 +1496,6 @@ int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
1494#ifdef CONFIG_IDE_XFER_MODE 1496#ifdef CONFIG_IDE_XFER_MODE
1495int ide_scan_pio_blacklist(char *); 1497int ide_scan_pio_blacklist(char *);
1496const char *ide_xfer_verbose(u8); 1498const char *ide_xfer_verbose(u8);
1497u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
1498int ide_pio_need_iordy(ide_drive_t *, const u8); 1499int ide_pio_need_iordy(ide_drive_t *, const u8);
1499int ide_set_pio_mode(ide_drive_t *, u8); 1500int ide_set_pio_mode(ide_drive_t *, u8);
1500int ide_set_dma_mode(ide_drive_t *, u8); 1501int ide_set_dma_mode(ide_drive_t *, u8);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d13492df57a1..707ab122e2e6 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -400,7 +400,9 @@ static inline int irq_has_action(unsigned int irq)
400 400
401/* Dynamic irq helper functions */ 401/* Dynamic irq helper functions */
402extern void dynamic_irq_init(unsigned int irq); 402extern void dynamic_irq_init(unsigned int irq);
403void dynamic_irq_init_keep_chip_data(unsigned int irq);
403extern void dynamic_irq_cleanup(unsigned int irq); 404extern void dynamic_irq_cleanup(unsigned int irq);
405void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
404 406
405/* Set/get chip/data for an IRQ: */ 407/* Set/get chip/data for an IRQ: */
406extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); 408extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4cf619161ed0..1ec876358180 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -69,15 +69,8 @@ extern u8 jbd2_journal_enable_debug;
69#define jbd_debug(f, a...) /**/ 69#define jbd_debug(f, a...) /**/
70#endif 70#endif
71 71
72static inline void *jbd2_alloc(size_t size, gfp_t flags) 72extern void *jbd2_alloc(size_t size, gfp_t flags);
73{ 73extern void jbd2_free(void *ptr, size_t size);
74 return (void *)__get_free_pages(flags, get_order(size));
75}
76
77static inline void jbd2_free(void *ptr, size_t size)
78{
79 free_pages((unsigned long)ptr, get_order(size));
80};
81 74
82#define JBD2_MIN_JOURNAL_BLOCKS 1024 75#define JBD2_MIN_JOURNAL_BLOCKS 1024
83 76
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1221d2331a6d..7f0707463360 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,16 @@ extern const char linux_proc_banner[];
44 44
45#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) 45#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
46 46
47/*
48 * This looks more complex than it should be. But we need to
49 * get the type for the ~ right in round_down (it needs to be
50 * as wide as the result!), and we want to evaluate the macro
51 * arguments just once each.
52 */
53#define __round_mask(x, y) ((__typeof__(x))((y)-1))
54#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
55#define round_down(x, y) ((x) & ~__round_mask(x, y))
56
47#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 57#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
48#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 58#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
49#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 59#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c356b6914ffd..03e8e8dbc577 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -199,7 +199,7 @@ extern struct kimage *kexec_crash_image;
199 */ 199 */
200extern struct resource crashk_res; 200extern struct resource crashk_res;
201typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4]; 201typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
202extern note_buf_t *crash_notes; 202extern note_buf_t __percpu *crash_notes;
203extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; 203extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
204extern size_t vmcoreinfo_size; 204extern size_t vmcoreinfo_size;
205extern size_t vmcoreinfo_max_size; 205extern size_t vmcoreinfo_max_size;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1b672f74a32f..e7d1b2e0070d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -122,6 +122,11 @@ struct kprobe {
122/* Kprobe status flags */ 122/* Kprobe status flags */
123#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ 123#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
124#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ 124#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
125#define KPROBE_FLAG_OPTIMIZED 4 /*
126 * probe is really optimized.
127 * NOTE:
128 * this flag is only for optimized_kprobe.
129 */
125 130
126/* Has this kprobe gone ? */ 131/* Has this kprobe gone ? */
127static inline int kprobe_gone(struct kprobe *p) 132static inline int kprobe_gone(struct kprobe *p)
@@ -134,6 +139,12 @@ static inline int kprobe_disabled(struct kprobe *p)
134{ 139{
135 return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); 140 return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
136} 141}
142
143/* Is this kprobe really running optimized path ? */
144static inline int kprobe_optimized(struct kprobe *p)
145{
146 return p->flags & KPROBE_FLAG_OPTIMIZED;
147}
137/* 148/*
138 * Special probe type that uses setjmp-longjmp type tricks to resume 149 * Special probe type that uses setjmp-longjmp type tricks to resume
139 * execution at a specified entry with a matching prototype corresponding 150 * execution at a specified entry with a matching prototype corresponding
@@ -249,6 +260,39 @@ extern kprobe_opcode_t *get_insn_slot(void);
249extern void free_insn_slot(kprobe_opcode_t *slot, int dirty); 260extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
250extern void kprobes_inc_nmissed_count(struct kprobe *p); 261extern void kprobes_inc_nmissed_count(struct kprobe *p);
251 262
263#ifdef CONFIG_OPTPROBES
264/*
265 * Internal structure for direct jump optimized probe
266 */
267struct optimized_kprobe {
268 struct kprobe kp;
269 struct list_head list; /* list for optimizing queue */
270 struct arch_optimized_insn optinsn;
271};
272
273/* Architecture dependent functions for direct jump optimization */
274extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
275extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
276extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op);
277extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
278extern int arch_optimize_kprobe(struct optimized_kprobe *op);
279extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
280extern kprobe_opcode_t *get_optinsn_slot(void);
281extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
282extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
283 unsigned long addr);
284
285extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
286
287#ifdef CONFIG_SYSCTL
288extern int sysctl_kprobes_optimization;
289extern int proc_kprobes_optimization_handler(struct ctl_table *table,
290 int write, void __user *buffer,
291 size_t *length, loff_t *ppos);
292#endif
293
294#endif /* CONFIG_OPTPROBES */
295
252/* Get the kprobe at this addr (if any) - called with preemption disabled */ 296/* Get the kprobe at this addr (if any) - called with preemption disabled */
253struct kprobe *get_kprobe(void *addr); 297struct kprobe *get_kprobe(void *addr);
254void kretprobe_hash_lock(struct task_struct *tsk, 298void kretprobe_hash_lock(struct task_struct *tsk,
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index a24de0b1858e..60df9c84ecae 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -103,7 +103,7 @@ struct kvm_userspace_memory_region {
103 103
104/* for kvm_memory_region::flags */ 104/* for kvm_memory_region::flags */
105#define KVM_MEM_LOG_DIRTY_PAGES 1UL 105#define KVM_MEM_LOG_DIRTY_PAGES 1UL
106 106#define KVM_MEMSLOT_INVALID (1UL << 1)
107 107
108/* for KVM_IRQ_LINE */ 108/* for KVM_IRQ_LINE */
109struct kvm_irq_level { 109struct kvm_irq_level {
@@ -497,6 +497,11 @@ struct kvm_ioeventfd {
497#endif 497#endif
498#define KVM_CAP_S390_PSW 42 498#define KVM_CAP_S390_PSW 42
499#define KVM_CAP_PPC_SEGSTATE 43 499#define KVM_CAP_PPC_SEGSTATE 43
500#define KVM_CAP_HYPERV 44
501#define KVM_CAP_HYPERV_VAPIC 45
502#define KVM_CAP_HYPERV_SPIN 46
503#define KVM_CAP_PCI_SEGMENT 47
504#define KVM_CAP_X86_ROBUST_SINGLESTEP 51
500 505
501#ifdef KVM_CAP_IRQ_ROUTING 506#ifdef KVM_CAP_IRQ_ROUTING
502 507
@@ -691,8 +696,9 @@ struct kvm_assigned_pci_dev {
691 __u32 busnr; 696 __u32 busnr;
692 __u32 devfn; 697 __u32 devfn;
693 __u32 flags; 698 __u32 flags;
699 __u32 segnr;
694 union { 700 union {
695 __u32 reserved[12]; 701 __u32 reserved[11];
696 }; 702 };
697}; 703};
698 704
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616d9373..a3fd0f91d943 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -38,6 +38,7 @@
38#define KVM_REQ_MMU_SYNC 7 38#define KVM_REQ_MMU_SYNC 7
39#define KVM_REQ_KVMCLOCK_UPDATE 8 39#define KVM_REQ_KVMCLOCK_UPDATE 8
40#define KVM_REQ_KICK 9 40#define KVM_REQ_KICK 9
41#define KVM_REQ_DEACTIVATE_FPU 10
41 42
42#define KVM_USERSPACE_IRQ_SOURCE_ID 0 43#define KVM_USERSPACE_IRQ_SOURCE_ID 0
43 44
@@ -57,20 +58,20 @@ struct kvm_io_bus {
57 struct kvm_io_device *devs[NR_IOBUS_DEVS]; 58 struct kvm_io_device *devs[NR_IOBUS_DEVS];
58}; 59};
59 60
60void kvm_io_bus_init(struct kvm_io_bus *bus); 61enum kvm_bus {
61void kvm_io_bus_destroy(struct kvm_io_bus *bus); 62 KVM_MMIO_BUS,
62int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len, 63 KVM_PIO_BUS,
63 const void *val); 64 KVM_NR_BUSES
64int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, 65};
66
67int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
68 int len, const void *val);
69int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
65 void *val); 70 void *val);
66int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, 71int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
67 struct kvm_io_device *dev);
68int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
69 struct kvm_io_device *dev); 72 struct kvm_io_device *dev);
70void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, 73int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
71 struct kvm_io_device *dev); 74 struct kvm_io_device *dev);
72void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
73 struct kvm_io_device *dev);
74 75
75struct kvm_vcpu { 76struct kvm_vcpu {
76 struct kvm *kvm; 77 struct kvm *kvm;
@@ -83,6 +84,8 @@ struct kvm_vcpu {
83 struct kvm_run *run; 84 struct kvm_run *run;
84 unsigned long requests; 85 unsigned long requests;
85 unsigned long guest_debug; 86 unsigned long guest_debug;
87 int srcu_idx;
88
86 int fpu_active; 89 int fpu_active;
87 int guest_fpu_loaded; 90 int guest_fpu_loaded;
88 wait_queue_head_t wq; 91 wait_queue_head_t wq;
@@ -150,14 +153,19 @@ struct kvm_irq_routing_table {};
150 153
151#endif 154#endif
152 155
153struct kvm { 156struct kvm_memslots {
154 spinlock_t mmu_lock;
155 spinlock_t requests_lock;
156 struct rw_semaphore slots_lock;
157 struct mm_struct *mm; /* userspace tied to this vm */
158 int nmemslots; 157 int nmemslots;
159 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 158 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
160 KVM_PRIVATE_MEM_SLOTS]; 159 KVM_PRIVATE_MEM_SLOTS];
160};
161
162struct kvm {
163 spinlock_t mmu_lock;
164 raw_spinlock_t requests_lock;
165 struct mutex slots_lock;
166 struct mm_struct *mm; /* userspace tied to this vm */
167 struct kvm_memslots *memslots;
168 struct srcu_struct srcu;
161#ifdef CONFIG_KVM_APIC_ARCHITECTURE 169#ifdef CONFIG_KVM_APIC_ARCHITECTURE
162 u32 bsp_vcpu_id; 170 u32 bsp_vcpu_id;
163 struct kvm_vcpu *bsp_vcpu; 171 struct kvm_vcpu *bsp_vcpu;
@@ -166,8 +174,7 @@ struct kvm {
166 atomic_t online_vcpus; 174 atomic_t online_vcpus;
167 struct list_head vm_list; 175 struct list_head vm_list;
168 struct mutex lock; 176 struct mutex lock;
169 struct kvm_io_bus mmio_bus; 177 struct kvm_io_bus *buses[KVM_NR_BUSES];
170 struct kvm_io_bus pio_bus;
171#ifdef CONFIG_HAVE_KVM_EVENTFD 178#ifdef CONFIG_HAVE_KVM_EVENTFD
172 struct { 179 struct {
173 spinlock_t lock; 180 spinlock_t lock;
@@ -249,13 +256,20 @@ int kvm_set_memory_region(struct kvm *kvm,
249int __kvm_set_memory_region(struct kvm *kvm, 256int __kvm_set_memory_region(struct kvm *kvm,
250 struct kvm_userspace_memory_region *mem, 257 struct kvm_userspace_memory_region *mem,
251 int user_alloc); 258 int user_alloc);
252int kvm_arch_set_memory_region(struct kvm *kvm, 259int kvm_arch_prepare_memory_region(struct kvm *kvm,
260 struct kvm_memory_slot *memslot,
261 struct kvm_memory_slot old,
262 struct kvm_userspace_memory_region *mem,
263 int user_alloc);
264void kvm_arch_commit_memory_region(struct kvm *kvm,
253 struct kvm_userspace_memory_region *mem, 265 struct kvm_userspace_memory_region *mem,
254 struct kvm_memory_slot old, 266 struct kvm_memory_slot old,
255 int user_alloc); 267 int user_alloc);
256void kvm_disable_largepages(void); 268void kvm_disable_largepages(void);
257void kvm_arch_flush_shadow(struct kvm *kvm); 269void kvm_arch_flush_shadow(struct kvm *kvm);
258gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 270gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
271gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
272
259struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 273struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
260unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 274unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
261void kvm_release_page_clean(struct page *page); 275void kvm_release_page_clean(struct page *page);
@@ -264,6 +278,9 @@ void kvm_set_page_dirty(struct page *page);
264void kvm_set_page_accessed(struct page *page); 278void kvm_set_page_accessed(struct page *page);
265 279
266pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 280pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
281pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
282 struct kvm_memory_slot *slot, gfn_t gfn);
283int memslot_id(struct kvm *kvm, gfn_t gfn);
267void kvm_release_pfn_dirty(pfn_t); 284void kvm_release_pfn_dirty(pfn_t);
268void kvm_release_pfn_clean(pfn_t pfn); 285void kvm_release_pfn_clean(pfn_t pfn);
269void kvm_set_pfn_dirty(pfn_t pfn); 286void kvm_set_pfn_dirty(pfn_t pfn);
@@ -283,6 +300,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
283int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 300int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
284struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 301struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
285int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 302int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
303unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
286void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 304void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
287 305
288void kvm_vcpu_block(struct kvm_vcpu *vcpu); 306void kvm_vcpu_block(struct kvm_vcpu *vcpu);
@@ -383,6 +401,7 @@ struct kvm_assigned_dev_kernel {
383 struct work_struct interrupt_work; 401 struct work_struct interrupt_work;
384 struct list_head list; 402 struct list_head list;
385 int assigned_dev_id; 403 int assigned_dev_id;
404 int host_segnr;
386 int host_busnr; 405 int host_busnr;
387 int host_devfn; 406 int host_devfn;
388 unsigned int entries_nr; 407 unsigned int entries_nr;
@@ -429,8 +448,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
429#define KVM_IOMMU_CACHE_COHERENCY 0x1 448#define KVM_IOMMU_CACHE_COHERENCY 0x1
430 449
431#ifdef CONFIG_IOMMU_API 450#ifdef CONFIG_IOMMU_API
432int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, 451int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
433 unsigned long npages);
434int kvm_iommu_map_guest(struct kvm *kvm); 452int kvm_iommu_map_guest(struct kvm *kvm);
435int kvm_iommu_unmap_guest(struct kvm *kvm); 453int kvm_iommu_unmap_guest(struct kvm *kvm);
436int kvm_assign_device(struct kvm *kvm, 454int kvm_assign_device(struct kvm *kvm,
@@ -480,11 +498,6 @@ static inline void kvm_guest_exit(void)
480 current->flags &= ~PF_VCPU; 498 current->flags &= ~PF_VCPU;
481} 499}
482 500
483static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
484{
485 return slot - kvm->memslots;
486}
487
488static inline gpa_t gfn_to_gpa(gfn_t gfn) 501static inline gpa_t gfn_to_gpa(gfn_t gfn)
489{ 502{
490 return (gpa_t)gfn << PAGE_SHIFT; 503 return (gpa_t)gfn << PAGE_SHIFT;
@@ -532,6 +545,10 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
532} 545}
533#endif 546#endif
534 547
548#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
549#define unalias_gfn_instantiation unalias_gfn
550#endif
551
535#ifdef CONFIG_HAVE_KVM_IRQCHIP 552#ifdef CONFIG_HAVE_KVM_IRQCHIP
536 553
537#define KVM_MAX_IRQ_ROUTES 1024 554#define KVM_MAX_IRQ_ROUTES 1024
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 76285e01b39e..eb9800f05782 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -52,7 +52,6 @@
52#define CGROUP_SUPER_MAGIC 0x27e0eb 52#define CGROUP_SUPER_MAGIC 0x27e0eb
53 53
54#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA 54#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
55#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
56 55
57#define STACK_END_MAGIC 0x57AC6E9D 56#define STACK_END_MAGIC 0x57AC6E9D
58 57
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index 35680409b8cf..94cb51a64037 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -108,6 +108,8 @@ int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
108#define MC13783_REGU_V2 28 108#define MC13783_REGU_V2 28
109#define MC13783_REGU_V3 29 109#define MC13783_REGU_V3 29
110#define MC13783_REGU_V4 30 110#define MC13783_REGU_V4 30
111#define MC13783_REGU_PWGT1SPI 31
112#define MC13783_REGU_PWGT2SPI 32
111 113
112#define MC13783_IRQ_ADCDONE 0 114#define MC13783_IRQ_ADCDONE 0
113#define MC13783_IRQ_ADCBISDONE 1 115#define MC13783_IRQ_ADCBISDONE 1
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b2fa8593c61..90957f14195c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,6 +12,7 @@
12#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
13#include <linux/debug_locks.h> 13#include <linux/debug_locks.h>
14#include <linux/mm_types.h> 14#include <linux/mm_types.h>
15#include <linux/range.h>
15 16
16struct mempolicy; 17struct mempolicy;
17struct anon_vma; 18struct anon_vma;
@@ -1049,6 +1050,10 @@ extern void get_pfn_range_for_nid(unsigned int nid,
1049extern unsigned long find_min_pfn_with_active_regions(void); 1050extern unsigned long find_min_pfn_with_active_regions(void);
1050extern void free_bootmem_with_active_regions(int nid, 1051extern void free_bootmem_with_active_regions(int nid,
1051 unsigned long max_low_pfn); 1052 unsigned long max_low_pfn);
1053int add_from_early_node_map(struct range *range, int az,
1054 int nr_range, int nid);
1055void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
1056 u64 goal, u64 limit);
1052typedef int (*work_fn_t)(unsigned long, unsigned long, void *); 1057typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1053extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); 1058extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1054extern void sparse_memory_present_with_active_regions(int nid); 1059extern void sparse_memory_present_with_active_regions(int nid);
@@ -1081,11 +1086,7 @@ extern void si_meminfo(struct sysinfo * val);
1081extern void si_meminfo_node(struct sysinfo *val, int nid); 1086extern void si_meminfo_node(struct sysinfo *val, int nid);
1082extern int after_bootmem; 1087extern int after_bootmem;
1083 1088
1084#ifdef CONFIG_NUMA
1085extern void setup_per_cpu_pageset(void); 1089extern void setup_per_cpu_pageset(void);
1086#else
1087static inline void setup_per_cpu_pageset(void) {}
1088#endif
1089 1090
1090extern void zone_pcp_update(struct zone *zone); 1091extern void zone_pcp_update(struct zone *zone);
1091 1092
@@ -1321,12 +1322,19 @@ extern int randomize_va_space;
1321const char * arch_vma_name(struct vm_area_struct *vma); 1322const char * arch_vma_name(struct vm_area_struct *vma);
1322void print_vma_addr(char *prefix, unsigned long rip); 1323void print_vma_addr(char *prefix, unsigned long rip);
1323 1324
1325void sparse_mem_maps_populate_node(struct page **map_map,
1326 unsigned long pnum_begin,
1327 unsigned long pnum_end,
1328 unsigned long map_count,
1329 int nodeid);
1330
1324struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 1331struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1325pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 1332pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1326pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 1333pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1327pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 1334pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1328pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 1335pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1329void *vmemmap_alloc_block(unsigned long size, int node); 1336void *vmemmap_alloc_block(unsigned long size, int node);
1337void *vmemmap_alloc_block_buf(unsigned long size, int node);
1330void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 1338void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1331int vmemmap_populate_basepages(struct page *start_page, 1339int vmemmap_populate_basepages(struct page *start_page,
1332 unsigned long pages, int node); 1340 unsigned long pages, int node);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668c2542..a01a103341bd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -184,13 +184,7 @@ struct per_cpu_pageset {
184 s8 stat_threshold; 184 s8 stat_threshold;
185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
186#endif 186#endif
187} ____cacheline_aligned_in_smp; 187};
188
189#ifdef CONFIG_NUMA
190#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
191#else
192#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
193#endif
194 188
195#endif /* !__GENERATING_BOUNDS.H */ 189#endif /* !__GENERATING_BOUNDS.H */
196 190
@@ -306,10 +300,8 @@ struct zone {
306 */ 300 */
307 unsigned long min_unmapped_pages; 301 unsigned long min_unmapped_pages;
308 unsigned long min_slab_pages; 302 unsigned long min_slab_pages;
309 struct per_cpu_pageset *pageset[NR_CPUS];
310#else
311 struct per_cpu_pageset pageset[NR_CPUS];
312#endif 303#endif
304 struct per_cpu_pageset __percpu *pageset;
313 /* 305 /*
314 * free areas of different sizes 306 * free areas of different sizes
315 */ 307 */
@@ -620,7 +612,9 @@ typedef struct pglist_data {
620 struct page_cgroup *node_page_cgroup; 612 struct page_cgroup *node_page_cgroup;
621#endif 613#endif
622#endif 614#endif
615#ifndef CONFIG_NO_BOOTMEM
623 struct bootmem_data *bdata; 616 struct bootmem_data *bdata;
617#endif
624#ifdef CONFIG_MEMORY_HOTPLUG 618#ifdef CONFIG_MEMORY_HOTPLUG
625 /* 619 /*
626 * Must be held any time you expect node_start_pfn, node_present_pages 620 * Must be held any time you expect node_start_pfn, node_present_pages
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index d74785c2393a..0b89efc6f215 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -35,6 +35,7 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
35extern const struct seq_operations mounts_op; 35extern const struct seq_operations mounts_op;
36extern const struct seq_operations mountinfo_op; 36extern const struct seq_operations mountinfo_op;
37extern const struct seq_operations mountstats_op; 37extern const struct seq_operations mountstats_op;
38extern int mnt_had_events(struct proc_mounts *);
38 39
39#endif 40#endif
40#endif 41#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 6cb1a3cab5d3..dd618eb026aa 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -17,7 +17,7 @@
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/tracepoint.h> 18#include <linux/tracepoint.h>
19 19
20#include <asm/local.h> 20#include <linux/percpu.h>
21#include <asm/module.h> 21#include <asm/module.h>
22 22
23#include <trace/events/module.h> 23#include <trace/events/module.h>
@@ -363,11 +363,9 @@ struct module
363 /* Destruction function. */ 363 /* Destruction function. */
364 void (*exit)(void); 364 void (*exit)(void);
365 365
366#ifdef CONFIG_SMP 366 struct module_ref {
367 char *refptr; 367 int count;
368#else 368 } __percpu *refptr;
369 local_t ref;
370#endif
371#endif 369#endif
372 370
373#ifdef CONFIG_CONSTRUCTORS 371#ifdef CONFIG_CONSTRUCTORS
@@ -454,25 +452,16 @@ void __symbol_put(const char *symbol);
454#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) 452#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
455void symbol_put_addr(void *addr); 453void symbol_put_addr(void *addr);
456 454
457static inline local_t *__module_ref_addr(struct module *mod, int cpu)
458{
459#ifdef CONFIG_SMP
460 return (local_t *) (mod->refptr + per_cpu_offset(cpu));
461#else
462 return &mod->ref;
463#endif
464}
465
466/* Sometimes we know we already have a refcount, and it's easier not 455/* Sometimes we know we already have a refcount, and it's easier not
467 to handle the error case (which only happens with rmmod --wait). */ 456 to handle the error case (which only happens with rmmod --wait). */
468static inline void __module_get(struct module *module) 457static inline void __module_get(struct module *module)
469{ 458{
470 if (module) { 459 if (module) {
471 unsigned int cpu = get_cpu(); 460 preempt_disable();
472 local_inc(__module_ref_addr(module, cpu)); 461 __this_cpu_inc(module->refptr->count);
473 trace_module_get(module, _THIS_IP_, 462 trace_module_get(module, _THIS_IP_,
474 local_read(__module_ref_addr(module, cpu))); 463 __this_cpu_read(module->refptr->count));
475 put_cpu(); 464 preempt_enable();
476 } 465 }
477} 466}
478 467
@@ -481,15 +470,17 @@ static inline int try_module_get(struct module *module)
481 int ret = 1; 470 int ret = 1;
482 471
483 if (module) { 472 if (module) {
484 unsigned int cpu = get_cpu(); 473 preempt_disable();
474
485 if (likely(module_is_live(module))) { 475 if (likely(module_is_live(module))) {
486 local_inc(__module_ref_addr(module, cpu)); 476 __this_cpu_inc(module->refptr->count);
487 trace_module_get(module, _THIS_IP_, 477 trace_module_get(module, _THIS_IP_,
488 local_read(__module_ref_addr(module, cpu))); 478 __this_cpu_read(module->refptr->count));
489 } 479 }
490 else 480 else
491 ret = 0; 481 ret = 0;
492 put_cpu(); 482
483 preempt_enable();
493 } 484 }
494 return ret; 485 return ret;
495} 486}
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5d5275364867..4bd05474d11d 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -34,7 +34,18 @@ struct mnt_namespace;
34 34
35#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ 35#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
36#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ 36#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
37#define MNT_PNODE_MASK 0x3000 /* propagation flag mask */ 37/*
38 * MNT_SHARED_MASK is the set of flags that should be cleared when a
39 * mount becomes shared. Currently, this is only the flag that says a
40 * mount cannot be bind mounted, since this is how we create a mount
41 * that shares events with another mount. If you add a new MNT_*
42 * flag, consider how it interacts with shared mounts.
43 */
44#define MNT_SHARED_MASK (MNT_UNBINDABLE)
45#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
46
47
48#define MNT_INTERNAL 0x4000
38 49
39struct vfsmount { 50struct vfsmount {
40 struct list_head mnt_hash; 51 struct list_head mnt_hash;
@@ -66,7 +77,7 @@ struct vfsmount {
66 int mnt_pinned; 77 int mnt_pinned;
67 int mnt_ghosts; 78 int mnt_ghosts;
68#ifdef CONFIG_SMP 79#ifdef CONFIG_SMP
69 int *mnt_writers; 80 int __percpu *mnt_writers;
70#else 81#else
71 int mnt_writers; 82 int mnt_writers;
72#endif 83#endif
@@ -123,7 +134,6 @@ extern int do_add_mount(struct vfsmount *newmnt, struct path *path,
123 134
124extern void mark_mounts_for_expiry(struct list_head *mounts); 135extern void mark_mounts_for_expiry(struct list_head *mounts);
125 136
126extern spinlock_t vfsmount_lock;
127extern dev_t name_to_dev_t(char *name); 137extern dev_t name_to_dev_t(char *name);
128 138
129#endif /* _LINUX_MOUNT_H */ 139#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 34fc6be5bfcf..6a2e44fd75e2 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -105,7 +105,7 @@ struct nfs_server {
105 struct rpc_clnt * client; /* RPC client handle */ 105 struct rpc_clnt * client; /* RPC client handle */
106 struct rpc_clnt * client_acl; /* ACL RPC client handle */ 106 struct rpc_clnt * client_acl; /* ACL RPC client handle */
107 struct nlm_host *nlm_host; /* NLM client handle */ 107 struct nlm_host *nlm_host; /* NLM client handle */
108 struct nfs_iostats * io_stats; /* I/O statistics */ 108 struct nfs_iostats __percpu *io_stats; /* I/O statistics */
109 struct backing_dev_info backing_dev_info; 109 struct backing_dev_info backing_dev_info;
110 atomic_long_t writeback; /* number of writeback pages */ 110 atomic_long_t writeback; /* number of writeback pages */
111 int flags; /* various flags */ 111 int flags; /* various flags */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 3fe02cf8b65a..640702e97457 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -153,6 +153,7 @@ struct nilfs_super_root {
153 semantics also for data */ 153 semantics also for data */
154#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during 154#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
155 mount-time recovery */ 155 mount-time recovery */
156#define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */
156 157
157 158
158/** 159/**
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 0be824320580..9f688d243b86 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -770,7 +770,6 @@
770#define PCI_VENDOR_ID_TI 0x104c 770#define PCI_VENDOR_ID_TI 0x104c
771#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 771#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
772#define PCI_DEVICE_ID_TI_4450 0x8011 772#define PCI_DEVICE_ID_TI_4450 0x8011
773#define PCI_DEVICE_ID_TI_TSB43AB22 0x8023
774#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 773#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
775#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 774#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
776#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 775#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
@@ -2333,6 +2332,8 @@
2333#define PCI_VENDOR_ID_KORENIX 0x1982 2332#define PCI_VENDOR_ID_KORENIX 0x1982
2334#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 2333#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
2335#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff 2334#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
2335#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
2336#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
2336 2337
2337#define PCI_VENDOR_ID_QMI 0x1a32 2338#define PCI_VENDOR_ID_QMI 0x1a32
2338 2339
@@ -2697,6 +2698,7 @@
2697#define PCI_DEVICE_ID_NETMOS_9835 0x9835 2698#define PCI_DEVICE_ID_NETMOS_9835 0x9835
2698#define PCI_DEVICE_ID_NETMOS_9845 0x9845 2699#define PCI_DEVICE_ID_NETMOS_9845 0x9845
2699#define PCI_DEVICE_ID_NETMOS_9855 0x9855 2700#define PCI_DEVICE_ID_NETMOS_9855 0x9855
2701#define PCI_DEVICE_ID_NETMOS_9865 0x9865
2700#define PCI_DEVICE_ID_NETMOS_9901 0x9901 2702#define PCI_DEVICE_ID_NETMOS_9901 0x9901
2701 2703
2702#define PCI_VENDOR_ID_3COM_2 0xa727 2704#define PCI_VENDOR_ID_3COM_2 0xa727
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 5a5d6ce4bd55..68567c0b3a5d 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -2,12 +2,6 @@
2#define _LINUX_PERCPU_DEFS_H 2#define _LINUX_PERCPU_DEFS_H
3 3
4/* 4/*
5 * Determine the real variable name from the name visible in the
6 * kernel sources.
7 */
8#define per_cpu_var(var) per_cpu__##var
9
10/*
11 * Base implementations of per-CPU variable declarations and definitions, where 5 * Base implementations of per-CPU variable declarations and definitions, where
12 * the section in which the variable is to be placed is provided by the 6 * the section in which the variable is to be placed is provided by the
13 * 'sec' argument. This may be used to affect the parameters governing the 7 * 'sec' argument. This may be used to affect the parameters governing the
@@ -18,13 +12,23 @@
18 * that section. 12 * that section.
19 */ 13 */
20#define __PCPU_ATTRS(sec) \ 14#define __PCPU_ATTRS(sec) \
21 __attribute__((section(PER_CPU_BASE_SECTION sec))) \ 15 __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
22 PER_CPU_ATTRIBUTES 16 PER_CPU_ATTRIBUTES
23 17
24#define __PCPU_DUMMY_ATTRS \ 18#define __PCPU_DUMMY_ATTRS \
25 __attribute__((section(".discard"), unused)) 19 __attribute__((section(".discard"), unused))
26 20
27/* 21/*
22 * Macro which verifies @ptr is a percpu pointer without evaluating
23 * @ptr. This is to be used in percpu accessors to verify that the
24 * input parameter is a percpu pointer.
25 */
26#define __verify_pcpu_ptr(ptr) do { \
27 const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
28 (void)__vpp_verify; \
29} while (0)
30
31/*
28 * s390 and alpha modules require percpu variables to be defined as 32 * s390 and alpha modules require percpu variables to be defined as
29 * weak to force the compiler to generate GOT based external 33 * weak to force the compiler to generate GOT based external
30 * references for them. This is necessary because percpu sections 34 * references for them. This is necessary because percpu sections
@@ -56,24 +60,24 @@
56 */ 60 */
57#define DECLARE_PER_CPU_SECTION(type, name, sec) \ 61#define DECLARE_PER_CPU_SECTION(type, name, sec) \
58 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 62 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
59 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 63 extern __PCPU_ATTRS(sec) __typeof__(type) name
60 64
61#define DEFINE_PER_CPU_SECTION(type, name, sec) \ 65#define DEFINE_PER_CPU_SECTION(type, name, sec) \
62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 66 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 67 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
64 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 68 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
65 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 69 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
66 __typeof__(type) per_cpu__##name 70 __typeof__(type) name
67#else 71#else
68/* 72/*
69 * Normal declaration and definition macros. 73 * Normal declaration and definition macros.
70 */ 74 */
71#define DECLARE_PER_CPU_SECTION(type, name, sec) \ 75#define DECLARE_PER_CPU_SECTION(type, name, sec) \
72 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 76 extern __PCPU_ATTRS(sec) __typeof__(type) name
73 77
74#define DEFINE_PER_CPU_SECTION(type, name, sec) \ 78#define DEFINE_PER_CPU_SECTION(type, name, sec) \
75 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ 79 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
76 __typeof__(type) per_cpu__##name 80 __typeof__(type) name
77#endif 81#endif
78 82
79/* 83/*
@@ -135,10 +139,16 @@
135 __aligned(PAGE_SIZE) 139 __aligned(PAGE_SIZE)
136 140
137/* 141/*
138 * Intermodule exports for per-CPU variables. 142 * Intermodule exports for per-CPU variables. sparse forgets about
143 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
144 * noop if __CHECKER__.
139 */ 145 */
140#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 146#ifndef __CHECKER__
141#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 147#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
142 148#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
149#else
150#define EXPORT_PER_CPU_SYMBOL(var)
151#define EXPORT_PER_CPU_SYMBOL_GPL(var)
152#endif
143 153
144#endif /* _LINUX_PERCPU_DEFS_H */ 154#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cf5efbcf716c..a93e5bfdccb8 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -27,10 +27,17 @@
27 * we force a syntax error here if it isn't. 27 * we force a syntax error here if it isn't.
28 */ 28 */
29#define get_cpu_var(var) (*({ \ 29#define get_cpu_var(var) (*({ \
30 extern int simple_identifier_##var(void); \
31 preempt_disable(); \ 30 preempt_disable(); \
32 &__get_cpu_var(var); })) 31 &__get_cpu_var(var); }))
33#define put_cpu_var(var) preempt_enable() 32
33/*
34 * The weird & is necessary because sparse considers (void)(var) to be
35 * a direct dereference of percpu variable (var).
36 */
37#define put_cpu_var(var) do { \
38 (void)&(var); \
39 preempt_enable(); \
40} while (0)
34 41
35#ifdef CONFIG_SMP 42#ifdef CONFIG_SMP
36 43
@@ -127,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
127 */ 134 */
128#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 135#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
129 136
130extern void *__alloc_reserved_percpu(size_t size, size_t align); 137extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
131extern void *__alloc_percpu(size_t size, size_t align); 138extern void __percpu *__alloc_percpu(size_t size, size_t align);
132extern void free_percpu(void *__pdata); 139extern void free_percpu(void __percpu *__pdata);
133extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 140extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
134 141
135#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 142#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
@@ -140,7 +147,7 @@ extern void __init setup_per_cpu_areas(void);
140 147
141#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 148#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
142 149
143static inline void *__alloc_percpu(size_t size, size_t align) 150static inline void __percpu *__alloc_percpu(size_t size, size_t align)
144{ 151{
145 /* 152 /*
146 * Can't easily make larger alignment work with kmalloc. WARN 153 * Can't easily make larger alignment work with kmalloc. WARN
@@ -151,7 +158,7 @@ static inline void *__alloc_percpu(size_t size, size_t align)
151 return kzalloc(size, GFP_KERNEL); 158 return kzalloc(size, GFP_KERNEL);
152} 159}
153 160
154static inline void free_percpu(void *p) 161static inline void free_percpu(void __percpu *p)
155{ 162{
156 kfree(p); 163 kfree(p);
157} 164}
@@ -171,7 +178,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
171#endif /* CONFIG_SMP */ 178#endif /* CONFIG_SMP */
172 179
173#define alloc_percpu(type) \ 180#define alloc_percpu(type) \
174 (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) 181 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
175 182
176/* 183/*
177 * Optional methods for optimized non-lvalue per-cpu variable access. 184 * Optional methods for optimized non-lvalue per-cpu variable access.
@@ -188,17 +195,19 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
188#ifndef percpu_read 195#ifndef percpu_read
189# define percpu_read(var) \ 196# define percpu_read(var) \
190 ({ \ 197 ({ \
191 typeof(per_cpu_var(var)) __tmp_var__; \ 198 typeof(var) *pr_ptr__ = &(var); \
192 __tmp_var__ = get_cpu_var(var); \ 199 typeof(var) pr_ret__; \
193 put_cpu_var(var); \ 200 pr_ret__ = get_cpu_var(*pr_ptr__); \
194 __tmp_var__; \ 201 put_cpu_var(*pr_ptr__); \
202 pr_ret__; \
195 }) 203 })
196#endif 204#endif
197 205
198#define __percpu_generic_to_op(var, val, op) \ 206#define __percpu_generic_to_op(var, val, op) \
199do { \ 207do { \
200 get_cpu_var(var) op val; \ 208 typeof(var) *pgto_ptr__ = &(var); \
201 put_cpu_var(var); \ 209 get_cpu_var(*pgto_ptr__) op val; \
210 put_cpu_var(*pgto_ptr__); \
202} while (0) 211} while (0)
203 212
204#ifndef percpu_write 213#ifndef percpu_write
@@ -234,6 +243,7 @@ extern void __bad_size_call_parameter(void);
234 243
235#define __pcpu_size_call_return(stem, variable) \ 244#define __pcpu_size_call_return(stem, variable) \
236({ typeof(variable) pscr_ret__; \ 245({ typeof(variable) pscr_ret__; \
246 __verify_pcpu_ptr(&(variable)); \
237 switch(sizeof(variable)) { \ 247 switch(sizeof(variable)) { \
238 case 1: pscr_ret__ = stem##1(variable);break; \ 248 case 1: pscr_ret__ = stem##1(variable);break; \
239 case 2: pscr_ret__ = stem##2(variable);break; \ 249 case 2: pscr_ret__ = stem##2(variable);break; \
@@ -247,6 +257,7 @@ extern void __bad_size_call_parameter(void);
247 257
248#define __pcpu_size_call(stem, variable, ...) \ 258#define __pcpu_size_call(stem, variable, ...) \
249do { \ 259do { \
260 __verify_pcpu_ptr(&(variable)); \
250 switch(sizeof(variable)) { \ 261 switch(sizeof(variable)) { \
251 case 1: stem##1(variable, __VA_ARGS__);break; \ 262 case 1: stem##1(variable, __VA_ARGS__);break; \
252 case 2: stem##2(variable, __VA_ARGS__);break; \ 263 case 2: stem##2(variable, __VA_ARGS__);break; \
@@ -259,8 +270,7 @@ do { \
259 270
260/* 271/*
261 * Optimized manipulation for memory allocated through the per cpu 272 * Optimized manipulation for memory allocated through the per cpu
262 * allocator or for addresses of per cpu variables (can be determined 273 * allocator or for addresses of per cpu variables.
263 * using per_cpu_var(xx).
264 * 274 *
265 * These operation guarantee exclusivity of access for other operations 275 * These operation guarantee exclusivity of access for other operations
266 * on the *same* processor. The assumption is that per cpu data is only 276 * on the *same* processor. The assumption is that per cpu data is only
@@ -311,7 +321,7 @@ do { \
311#define _this_cpu_generic_to_op(pcp, val, op) \ 321#define _this_cpu_generic_to_op(pcp, val, op) \
312do { \ 322do { \
313 preempt_disable(); \ 323 preempt_disable(); \
314 *__this_cpu_ptr(&pcp) op val; \ 324 *__this_cpu_ptr(&(pcp)) op val; \
315 preempt_enable(); \ 325 preempt_enable(); \
316} while (0) 326} while (0)
317 327
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 794662b2be5d..c88d67b59394 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -21,7 +21,7 @@ struct percpu_counter {
21#ifdef CONFIG_HOTPLUG_CPU 21#ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */ 22 struct list_head list; /* All percpu_counters are on a list */
23#endif 23#endif
24 s32 *counters; 24 s32 __percpu *counters;
25}; 25};
26 26
27extern int percpu_counter_batch; 27extern int percpu_counter_batch;
diff --git a/include/linux/range.h b/include/linux/range.h
new file mode 100644
index 000000000000..bd184a5db791
--- /dev/null
+++ b/include/linux/range.h
@@ -0,0 +1,30 @@
1#ifndef _LINUX_RANGE_H
2#define _LINUX_RANGE_H
3
4struct range {
5 u64 start;
6 u64 end;
7};
8
9int add_range(struct range *range, int az, int nr_range,
10 u64 start, u64 end);
11
12
13int add_range_with_merge(struct range *range, int az, int nr_range,
14 u64 start, u64 end);
15
16void subtract_range(struct range *range, int az, u64 start, u64 end);
17
18int clean_sort_range(struct range *range, int az);
19
20void sort_range(struct range *range, int nr_range);
21
22#define MAX_RESOURCE ((resource_size_t)~0)
23static inline resource_size_t cap_resource(u64 val)
24{
25 if (val > MAX_RESOURCE)
26 return MAX_RESOURCE;
27
28 return val;
29}
30#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 030d92255c7a..28c9fd020d39 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -89,8 +89,9 @@
89 * REGULATION_OUT Regulator output is out of regulation. 89 * REGULATION_OUT Regulator output is out of regulation.
90 * FAIL Regulator output has failed. 90 * FAIL Regulator output has failed.
91 * OVER_TEMP Regulator over temp. 91 * OVER_TEMP Regulator over temp.
92 * FORCE_DISABLE Regulator shut down by software. 92 * FORCE_DISABLE Regulator forcibly shut down by software.
93 * VOLTAGE_CHANGE Regulator voltage changed. 93 * VOLTAGE_CHANGE Regulator voltage changed.
94 * DISABLE Regulator was disabled.
94 * 95 *
95 * NOTE: These events can be OR'ed together when passed into handler. 96 * NOTE: These events can be OR'ed together when passed into handler.
96 */ 97 */
@@ -102,6 +103,7 @@
102#define REGULATOR_EVENT_OVER_TEMP 0x10 103#define REGULATOR_EVENT_OVER_TEMP 0x10
103#define REGULATOR_EVENT_FORCE_DISABLE 0x20 104#define REGULATOR_EVENT_FORCE_DISABLE 0x20
104#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 105#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
106#define REGULATOR_EVENT_DISABLE 0x80
105 107
106struct regulator; 108struct regulator;
107 109
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 31f2055eae28..592cd7c642c2 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -58,6 +58,9 @@ enum regulator_status {
58 * @get_optimum_mode: Get the most efficient operating mode for the regulator 58 * @get_optimum_mode: Get the most efficient operating mode for the regulator
59 * when running with the specified parameters. 59 * when running with the specified parameters.
60 * 60 *
61 * @enable_time: Time taken for the regulator voltage output voltage to
62 * stabalise after being enabled, in microseconds.
63 *
61 * @set_suspend_voltage: Set the voltage for the regulator when the system 64 * @set_suspend_voltage: Set the voltage for the regulator when the system
62 * is suspended. 65 * is suspended.
63 * @set_suspend_enable: Mark the regulator as enabled when the system is 66 * @set_suspend_enable: Mark the regulator as enabled when the system is
@@ -93,6 +96,9 @@ struct regulator_ops {
93 int (*set_mode) (struct regulator_dev *, unsigned int mode); 96 int (*set_mode) (struct regulator_dev *, unsigned int mode);
94 unsigned int (*get_mode) (struct regulator_dev *); 97 unsigned int (*get_mode) (struct regulator_dev *);
95 98
99 /* Time taken to enable the regulator */
100 int (*enable_time) (struct regulator_dev *);
101
96 /* report regulator status ... most other accessors report 102 /* report regulator status ... most other accessors report
97 * control inputs, this reports results of combining inputs 103 * control inputs, this reports results of combining inputs
98 * from Linux (and other sources) with the actual load. 104 * from Linux (and other sources) with the actual load.
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index e94a4a1c7c8a..ffd7d508e726 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -25,6 +25,7 @@ struct regulator_init_data;
25 * @microvolts: Output voltage of regulator 25 * @microvolts: Output voltage of regulator
26 * @gpio: GPIO to use for enable control 26 * @gpio: GPIO to use for enable control
27 * set to -EINVAL if not used 27 * set to -EINVAL if not used
28 * @startup_delay: Start-up time in microseconds
28 * @enable_high: Polarity of enable GPIO 29 * @enable_high: Polarity of enable GPIO
29 * 1 = Active high, 0 = Active low 30 * 1 = Active high, 0 = Active low
30 * @enabled_at_boot: Whether regulator has been enabled at 31 * @enabled_at_boot: Whether regulator has been enabled at
@@ -41,6 +42,7 @@ struct fixed_voltage_config {
41 const char *supply_name; 42 const char *supply_name;
42 int microvolts; 43 int microvolts;
43 int gpio; 44 int gpio;
45 unsigned startup_delay;
44 unsigned enable_high:1; 46 unsigned enable_high:1;
45 unsigned enabled_at_boot:1; 47 unsigned enabled_at_boot:1;
46 struct regulator_init_data *init_data; 48 struct regulator_init_data *init_data;
diff --git a/include/linux/regulator/max8649.h b/include/linux/regulator/max8649.h
new file mode 100644
index 000000000000..417d14ecd5cb
--- /dev/null
+++ b/include/linux/regulator/max8649.h
@@ -0,0 +1,44 @@
1/*
2 * Interface of Maxim max8649
3 *
4 * Copyright (C) 2009-2010 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __LINUX_REGULATOR_MAX8649_H
13#define __LINUX_REGULATOR_MAX8649_H
14
15#include <linux/regulator/machine.h>
16
17enum {
18 MAX8649_EXTCLK_26MHZ = 0,
19 MAX8649_EXTCLK_13MHZ,
20 MAX8649_EXTCLK_19MHZ, /* 19.2MHz */
21};
22
23enum {
24 MAX8649_RAMP_32MV = 0,
25 MAX8649_RAMP_16MV,
26 MAX8649_RAMP_8MV,
27 MAX8649_RAMP_4MV,
28 MAX8649_RAMP_2MV,
29 MAX8649_RAMP_1MV,
30 MAX8649_RAMP_0_5MV,
31 MAX8649_RAMP_0_25MV,
32};
33
34struct max8649_platform_data {
35 struct regulator_init_data *regulator;
36
37 unsigned mode:2; /* bit[1:0] = VID1,VID0 */
38 unsigned extclk_freq:2;
39 unsigned extclk:1;
40 unsigned ramp_timing:3;
41 unsigned ramp_down:1;
42};
43
44#endif /* __LINUX_REGULATOR_MAX8649_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 1ba3cf6edfbb..3b603f474186 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2034,7 +2034,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
2034int reiserfs_find_actor(struct inode *inode, void *p); 2034int reiserfs_find_actor(struct inode *inode, void *p);
2035int reiserfs_init_locked_inode(struct inode *inode, void *p); 2035int reiserfs_init_locked_inode(struct inode *inode, void *p);
2036void reiserfs_delete_inode(struct inode *inode); 2036void reiserfs_delete_inode(struct inode *inode);
2037int reiserfs_write_inode(struct inode *inode, int); 2037int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc);
2038int reiserfs_get_block(struct inode *inode, sector_t block, 2038int reiserfs_get_block(struct inode *inode, sector_t block,
2039 struct buffer_head *bh_result, int create); 2039 struct buffer_head *bh_result, int create);
2040struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, 2040struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
diff --git a/include/linux/security.h b/include/linux/security.h
index 2c627d361c02..233d20b52c1b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -76,7 +76,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
76extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); 76extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
77extern int cap_task_setioprio(struct task_struct *p, int ioprio); 77extern int cap_task_setioprio(struct task_struct *p, int ioprio);
78extern int cap_task_setnice(struct task_struct *p, int nice); 78extern int cap_task_setnice(struct task_struct *p, int nice);
79extern int cap_syslog(int type); 79extern int cap_syslog(int type, bool from_file);
80extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); 80extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
81 81
82struct msghdr; 82struct msghdr;
@@ -95,6 +95,8 @@ struct seq_file;
95extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); 95extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
96extern int cap_netlink_recv(struct sk_buff *skb, int cap); 96extern int cap_netlink_recv(struct sk_buff *skb, int cap);
97 97
98void reset_security_ops(void);
99
98#ifdef CONFIG_MMU 100#ifdef CONFIG_MMU
99extern unsigned long mmap_min_addr; 101extern unsigned long mmap_min_addr;
100extern unsigned long dac_mmap_min_addr; 102extern unsigned long dac_mmap_min_addr;
@@ -985,6 +987,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
985 * Check permissions on incoming network packets. This hook is distinct 987 * Check permissions on incoming network packets. This hook is distinct
986 * from Netfilter's IP input hooks since it is the first time that the 988 * from Netfilter's IP input hooks since it is the first time that the
987 * incoming sk_buff @skb has been associated with a particular socket, @sk. 989 * incoming sk_buff @skb has been associated with a particular socket, @sk.
990 * Must not sleep inside this hook because some callers hold spinlocks.
988 * @sk contains the sock (not socket) associated with the incoming sk_buff. 991 * @sk contains the sock (not socket) associated with the incoming sk_buff.
989 * @skb contains the incoming network data. 992 * @skb contains the incoming network data.
990 * @socket_getpeersec_stream: 993 * @socket_getpeersec_stream:
@@ -1348,6 +1351,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1348 * logging to the console. 1351 * logging to the console.
1349 * See the syslog(2) manual page for an explanation of the @type values. 1352 * See the syslog(2) manual page for an explanation of the @type values.
1350 * @type contains the type of action. 1353 * @type contains the type of action.
1354 * @from_file indicates the context of action (if it came from /proc).
1351 * Return 0 if permission is granted. 1355 * Return 0 if permission is granted.
1352 * @settime: 1356 * @settime:
1353 * Check permission to change the system time. 1357 * Check permission to change the system time.
@@ -1462,7 +1466,7 @@ struct security_operations {
1462 int (*sysctl) (struct ctl_table *table, int op); 1466 int (*sysctl) (struct ctl_table *table, int op);
1463 int (*quotactl) (int cmds, int type, int id, struct super_block *sb); 1467 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
1464 int (*quota_on) (struct dentry *dentry); 1468 int (*quota_on) (struct dentry *dentry);
1465 int (*syslog) (int type); 1469 int (*syslog) (int type, bool from_file);
1466 int (*settime) (struct timespec *ts, struct timezone *tz); 1470 int (*settime) (struct timespec *ts, struct timezone *tz);
1467 int (*vm_enough_memory) (struct mm_struct *mm, long pages); 1471 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
1468 1472
@@ -1761,7 +1765,7 @@ int security_acct(struct file *file);
1761int security_sysctl(struct ctl_table *table, int op); 1765int security_sysctl(struct ctl_table *table, int op);
1762int security_quotactl(int cmds, int type, int id, struct super_block *sb); 1766int security_quotactl(int cmds, int type, int id, struct super_block *sb);
1763int security_quota_on(struct dentry *dentry); 1767int security_quota_on(struct dentry *dentry);
1764int security_syslog(int type); 1768int security_syslog(int type, bool from_file);
1765int security_settime(struct timespec *ts, struct timezone *tz); 1769int security_settime(struct timespec *ts, struct timezone *tz);
1766int security_vm_enough_memory(long pages); 1770int security_vm_enough_memory(long pages);
1767int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); 1771int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
@@ -2007,9 +2011,9 @@ static inline int security_quota_on(struct dentry *dentry)
2007 return 0; 2011 return 0;
2008} 2012}
2009 2013
2010static inline int security_syslog(int type) 2014static inline int security_syslog(int type, bool from_file)
2011{ 2015{
2012 return cap_syslog(type); 2016 return cap_syslog(type, from_file);
2013} 2017}
2014 2018
2015static inline int security_settime(struct timespec *ts, struct timezone *tz) 2019static inline int security_settime(struct timespec *ts, struct timezone *tz)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 3084f80909cd..4d5ecb222af9 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -33,7 +33,7 @@ struct srcu_struct_array {
33 33
34struct srcu_struct { 34struct srcu_struct {
35 int completed; 35 int completed;
36 struct srcu_struct_array *per_cpu_ref; 36 struct srcu_struct_array __percpu *per_cpu_ref;
37 struct mutex mutex; 37 struct mutex mutex;
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 38#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 39 struct lockdep_map dep_map;
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
new file mode 100644
index 000000000000..38911391a139
--- /dev/null
+++ b/include/linux/syslog.h
@@ -0,0 +1,52 @@
1/* Syslog internals
2 *
3 * Copyright 2010 Canonical, Ltd.
4 * Author: Kees Cook <kees.cook@canonical.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _LINUX_SYSLOG_H
22#define _LINUX_SYSLOG_H
23
24/* Close the log. Currently a NOP. */
25#define SYSLOG_ACTION_CLOSE 0
26/* Open the log. Currently a NOP. */
27#define SYSLOG_ACTION_OPEN 1
28/* Read from the log. */
29#define SYSLOG_ACTION_READ 2
30/* Read all messages remaining in the ring buffer. */
31#define SYSLOG_ACTION_READ_ALL 3
32/* Read and clear all messages remaining in the ring buffer */
33#define SYSLOG_ACTION_READ_CLEAR 4
34/* Clear ring buffer. */
35#define SYSLOG_ACTION_CLEAR 5
36/* Disable printk's to console */
37#define SYSLOG_ACTION_CONSOLE_OFF 6
38/* Enable printk's to console */
39#define SYSLOG_ACTION_CONSOLE_ON 7
40/* Set level of messages printed to console */
41#define SYSLOG_ACTION_CONSOLE_LEVEL 8
42/* Return number of unread characters in the log buffer */
43#define SYSLOG_ACTION_SIZE_UNREAD 9
44/* Return size of the log buffer */
45#define SYSLOG_ACTION_SIZE_BUFFER 10
46
47#define SYSLOG_FROM_CALL 0
48#define SYSLOG_FROM_FILE 1
49
50int do_syslog(int type, char __user *buf, int count, bool from_file);
51
52#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 6abfcf5b5887..d96e5882f129 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -68,6 +68,16 @@ struct tty_buffer {
68 unsigned long data[0]; 68 unsigned long data[0];
69}; 69};
70 70
71/*
72 * We default to dicing tty buffer allocations to this many characters
73 * in order to avoid multiple page allocations. We assume tty_buffer itself
74 * is under 256 bytes. See tty_buffer_find for the allocation logic this
75 * must match
76 */
77
78#define TTY_BUFFER_PAGE ((PAGE_SIZE - 256) / 2)
79
80
71struct tty_bufhead { 81struct tty_bufhead {
72 struct delayed_work work; 82 struct delayed_work work;
73 spinlock_t lock; 83 spinlock_t lock;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index eb677cf56106..9239d033a0a3 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -2,8 +2,8 @@
2#define _LINUX_TTY_FLIP_H 2#define _LINUX_TTY_FLIP_H
3 3
4extern int tty_buffer_request_room(struct tty_struct *tty, size_t size); 4extern int tty_buffer_request_room(struct tty_struct *tty, size_t size);
5extern int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size);
6extern int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size); 5extern int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size);
6extern int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, const unsigned char *chars, char flag, size_t size);
7extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size); 7extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
8extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size); 8extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
9void tty_schedule_flip(struct tty_struct *tty); 9void tty_schedule_flip(struct tty_struct *tty);
@@ -20,4 +20,9 @@ static inline int tty_insert_flip_char(struct tty_struct *tty,
20 return tty_insert_flip_string_flags(tty, &ch, &flag, 1); 20 return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
21} 21}
22 22
23static inline int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size)
24{
25 return tty_insert_flip_string_fixed_flag(tty, chars, TTY_NORMAL, size);
26}
27
23#endif /* _LINUX_TTY_FLIP_H */ 28#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 332eaea61021..3492abf82e75 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -122,7 +122,6 @@ enum usb_interface_condition {
122 * number from the USB core by calling usb_register_dev(). 122 * number from the USB core by calling usb_register_dev().
123 * @condition: binding state of the interface: not bound, binding 123 * @condition: binding state of the interface: not bound, binding
124 * (in probe()), bound to a driver, or unbinding (in disconnect()) 124 * (in probe()), bound to a driver, or unbinding (in disconnect())
125 * @is_active: flag set when the interface is bound and not suspended.
126 * @sysfs_files_created: sysfs attributes exist 125 * @sysfs_files_created: sysfs attributes exist
127 * @ep_devs_created: endpoint child pseudo-devices exist 126 * @ep_devs_created: endpoint child pseudo-devices exist
128 * @unregistering: flag set when the interface is being unregistered 127 * @unregistering: flag set when the interface is being unregistered
@@ -135,8 +134,7 @@ enum usb_interface_condition {
135 * @dev: driver model's view of this device 134 * @dev: driver model's view of this device
136 * @usb_dev: if an interface is bound to the USB major, this will point 135 * @usb_dev: if an interface is bound to the USB major, this will point
137 * to the sysfs representation for that device. 136 * to the sysfs representation for that device.
138 * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not 137 * @pm_usage_cnt: PM usage counter for this interface
139 * allowed unless the counter is 0.
140 * @reset_ws: Used for scheduling resets from atomic context. 138 * @reset_ws: Used for scheduling resets from atomic context.
141 * @reset_running: set to 1 if the interface is currently running a 139 * @reset_running: set to 1 if the interface is currently running a
142 * queued reset so that usb_cancel_queued_reset() doesn't try to 140 * queued reset so that usb_cancel_queued_reset() doesn't try to
@@ -184,7 +182,6 @@ struct usb_interface {
184 int minor; /* minor number this interface is 182 int minor; /* minor number this interface is
185 * bound to */ 183 * bound to */
186 enum usb_interface_condition condition; /* state of binding */ 184 enum usb_interface_condition condition; /* state of binding */
187 unsigned is_active:1; /* the interface is not suspended */
188 unsigned sysfs_files_created:1; /* the sysfs attributes exist */ 185 unsigned sysfs_files_created:1; /* the sysfs attributes exist */
189 unsigned ep_devs_created:1; /* endpoint "devices" exist */ 186 unsigned ep_devs_created:1; /* endpoint "devices" exist */
190 unsigned unregistering:1; /* unregistration is in progress */ 187 unsigned unregistering:1; /* unregistration is in progress */
@@ -401,7 +398,6 @@ struct usb_tt;
401 * @portnum: parent port number (origin 1) 398 * @portnum: parent port number (origin 1)
402 * @level: number of USB hub ancestors 399 * @level: number of USB hub ancestors
403 * @can_submit: URBs may be submitted 400 * @can_submit: URBs may be submitted
404 * @discon_suspended: disconnected while suspended
405 * @persist_enabled: USB_PERSIST enabled for this device 401 * @persist_enabled: USB_PERSIST enabled for this device
406 * @have_langid: whether string_langid is valid 402 * @have_langid: whether string_langid is valid
407 * @authorized: policy has said we can use it; 403 * @authorized: policy has said we can use it;
@@ -421,20 +417,15 @@ struct usb_tt;
421 * @usbfs_dentry: usbfs dentry entry for the device 417 * @usbfs_dentry: usbfs dentry entry for the device
422 * @maxchild: number of ports if hub 418 * @maxchild: number of ports if hub
423 * @children: child devices - USB devices that are attached to this hub 419 * @children: child devices - USB devices that are attached to this hub
424 * @pm_usage_cnt: usage counter for autosuspend
425 * @quirks: quirks of the whole device 420 * @quirks: quirks of the whole device
426 * @urbnum: number of URBs submitted for the whole device 421 * @urbnum: number of URBs submitted for the whole device
427 * @active_duration: total time device is not suspended 422 * @active_duration: total time device is not suspended
428 * @autosuspend: for delayed autosuspends
429 * @autoresume: for autoresumes requested while in_interrupt
430 * @pm_mutex: protects PM operations
431 * @last_busy: time of last use 423 * @last_busy: time of last use
432 * @autosuspend_delay: in jiffies 424 * @autosuspend_delay: in jiffies
433 * @connect_time: time device was first connected 425 * @connect_time: time device was first connected
434 * @do_remote_wakeup: remote wakeup should be enabled 426 * @do_remote_wakeup: remote wakeup should be enabled
435 * @reset_resume: needs reset instead of resume 427 * @reset_resume: needs reset instead of resume
436 * @autosuspend_disabled: autosuspend disabled by the user 428 * @autosuspend_disabled: autosuspend disabled by the user
437 * @skip_sys_resume: skip the next system resume
438 * @wusb_dev: if this is a Wireless USB device, link to the WUSB 429 * @wusb_dev: if this is a Wireless USB device, link to the WUSB
439 * specific data for the device. 430 * specific data for the device.
440 * @slot_id: Slot ID assigned by xHCI 431 * @slot_id: Slot ID assigned by xHCI
@@ -475,7 +466,6 @@ struct usb_device {
475 u8 level; 466 u8 level;
476 467
477 unsigned can_submit:1; 468 unsigned can_submit:1;
478 unsigned discon_suspended:1;
479 unsigned persist_enabled:1; 469 unsigned persist_enabled:1;
480 unsigned have_langid:1; 470 unsigned have_langid:1;
481 unsigned authorized:1; 471 unsigned authorized:1;
@@ -499,17 +489,12 @@ struct usb_device {
499 int maxchild; 489 int maxchild;
500 struct usb_device *children[USB_MAXCHILDREN]; 490 struct usb_device *children[USB_MAXCHILDREN];
501 491
502 int pm_usage_cnt;
503 u32 quirks; 492 u32 quirks;
504 atomic_t urbnum; 493 atomic_t urbnum;
505 494
506 unsigned long active_duration; 495 unsigned long active_duration;
507 496
508#ifdef CONFIG_PM 497#ifdef CONFIG_PM
509 struct delayed_work autosuspend;
510 struct work_struct autoresume;
511 struct mutex pm_mutex;
512
513 unsigned long last_busy; 498 unsigned long last_busy;
514 int autosuspend_delay; 499 int autosuspend_delay;
515 unsigned long connect_time; 500 unsigned long connect_time;
@@ -517,7 +502,6 @@ struct usb_device {
517 unsigned do_remote_wakeup:1; 502 unsigned do_remote_wakeup:1;
518 unsigned reset_resume:1; 503 unsigned reset_resume:1;
519 unsigned autosuspend_disabled:1; 504 unsigned autosuspend_disabled:1;
520 unsigned skip_sys_resume:1;
521#endif 505#endif
522 struct wusb_dev *wusb_dev; 506 struct wusb_dev *wusb_dev;
523 int slot_id; 507 int slot_id;
@@ -542,21 +526,15 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
542 526
543/* USB autosuspend and autoresume */ 527/* USB autosuspend and autoresume */
544#ifdef CONFIG_USB_SUSPEND 528#ifdef CONFIG_USB_SUSPEND
529extern int usb_enable_autosuspend(struct usb_device *udev);
530extern int usb_disable_autosuspend(struct usb_device *udev);
531
545extern int usb_autopm_get_interface(struct usb_interface *intf); 532extern int usb_autopm_get_interface(struct usb_interface *intf);
546extern void usb_autopm_put_interface(struct usb_interface *intf); 533extern void usb_autopm_put_interface(struct usb_interface *intf);
547extern int usb_autopm_get_interface_async(struct usb_interface *intf); 534extern int usb_autopm_get_interface_async(struct usb_interface *intf);
548extern void usb_autopm_put_interface_async(struct usb_interface *intf); 535extern void usb_autopm_put_interface_async(struct usb_interface *intf);
549 536extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
550static inline void usb_autopm_get_interface_no_resume( 537extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
551 struct usb_interface *intf)
552{
553 atomic_inc(&intf->pm_usage_cnt);
554}
555static inline void usb_autopm_put_interface_no_suspend(
556 struct usb_interface *intf)
557{
558 atomic_dec(&intf->pm_usage_cnt);
559}
560 538
561static inline void usb_mark_last_busy(struct usb_device *udev) 539static inline void usb_mark_last_busy(struct usb_device *udev)
562{ 540{
@@ -565,6 +543,11 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
565 543
566#else 544#else
567 545
546static inline int usb_enable_autosuspend(struct usb_device *udev)
547{ return 0; }
548static inline int usb_disable_autosuspend(struct usb_device *udev)
549{ return 0; }
550
568static inline int usb_autopm_get_interface(struct usb_interface *intf) 551static inline int usb_autopm_get_interface(struct usb_interface *intf)
569{ return 0; } 552{ return 0; }
570static inline int usb_autopm_get_interface_async(struct usb_interface *intf) 553static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
@@ -1583,14 +1566,18 @@ extern void usb_register_notify(struct notifier_block *nb);
1583extern void usb_unregister_notify(struct notifier_block *nb); 1566extern void usb_unregister_notify(struct notifier_block *nb);
1584 1567
1585#ifdef DEBUG 1568#ifdef DEBUG
1586#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , \ 1569#define dbg(format, arg...) \
1587 __FILE__ , ## arg) 1570 printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg)
1588#else 1571#else
1589#define dbg(format, arg...) do {} while (0) 1572#define dbg(format, arg...) \
1573do { \
1574 if (0) \
1575 printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
1576} while (0)
1590#endif 1577#endif
1591 1578
1592#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \ 1579#define err(format, arg...) \
1593 format "\n" , ## arg) 1580 printk(KERN_ERR KBUILD_MODNAME ": " format "\n", ##arg)
1594 1581
1595/* debugfs stuff */ 1582/* debugfs stuff */
1596extern struct dentry *usb_debug_root; 1583extern struct dentry *usb_debug_root;
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
index 54c446309a2a..29fd73b0bffc 100644
--- a/include/linux/usb/Kbuild
+++ b/include/linux/usb/Kbuild
@@ -5,4 +5,3 @@ header-y += gadgetfs.h
5header-y += midi.h 5header-y += midi.h
6header-y += g_printer.h 6header-y += g_printer.h
7header-y += tmc.h 7header-y += tmc.h
8header-y += vstusb.h
diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h
index 6311fa2d9f82..baf41c8616e9 100644
--- a/include/linux/usb/atmel_usba_udc.h
+++ b/include/linux/usb/atmel_usba_udc.h
@@ -15,6 +15,7 @@ struct usba_ep_data {
15 15
16struct usba_platform_data { 16struct usba_platform_data {
17 int vbus_pin; 17 int vbus_pin;
18 int vbus_pin_inverted;
18 int num_ep; 19 int num_ep;
19 struct usba_ep_data ep[0]; 20 struct usba_ep_data ep[0];
20}; 21};
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 94012e649d86..e58369ff8168 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -775,7 +775,7 @@ enum usb_device_speed {
775 USB_SPEED_UNKNOWN = 0, /* enumerating */ 775 USB_SPEED_UNKNOWN = 0, /* enumerating */
776 USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */ 776 USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
777 USB_SPEED_HIGH, /* usb 2.0 */ 777 USB_SPEED_HIGH, /* usb 2.0 */
778 USB_SPEED_VARIABLE, /* wireless (usb 2.5) */ 778 USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
779 USB_SPEED_SUPER, /* usb 3.0 */ 779 USB_SPEED_SUPER, /* usb 3.0 */
780}; 780};
781 781
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 5dc2f227a0f1..7acef0234c0e 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -30,26 +30,26 @@ struct musb_hdrc_eps_bits {
30struct musb_hdrc_config { 30struct musb_hdrc_config {
31 /* MUSB configuration-specific details */ 31 /* MUSB configuration-specific details */
32 unsigned multipoint:1; /* multipoint device */ 32 unsigned multipoint:1; /* multipoint device */
33 unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ 33 unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */
34 unsigned soft_con:1; /* soft connect required */ 34 unsigned soft_con:1 __deprecated; /* soft connect required */
35 unsigned utm_16:1; /* utm data witdh is 16 bits */ 35 unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */
36 unsigned big_endian:1; /* true if CPU uses big-endian */ 36 unsigned big_endian:1; /* true if CPU uses big-endian */
37 unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ 37 unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
38 unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ 38 unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
39 unsigned high_iso_tx:1; /* Tx ep required for HB iso */ 39 unsigned high_iso_tx:1; /* Tx ep required for HB iso */
40 unsigned high_iso_rx:1; /* Rx ep required for HD iso */ 40 unsigned high_iso_rx:1; /* Rx ep required for HD iso */
41 unsigned dma:1; /* supports DMA */ 41 unsigned dma:1 __deprecated; /* supports DMA */
42 unsigned vendor_req:1; /* vendor registers required */ 42 unsigned vendor_req:1 __deprecated; /* vendor registers required */
43 43
44 u8 num_eps; /* number of endpoints _with_ ep0 */ 44 u8 num_eps; /* number of endpoints _with_ ep0 */
45 u8 dma_channels; /* number of dma channels */ 45 u8 dma_channels __deprecated; /* number of dma channels */
46 u8 dyn_fifo_size; /* dynamic size in bytes */ 46 u8 dyn_fifo_size; /* dynamic size in bytes */
47 u8 vendor_ctrl; /* vendor control reg width */ 47 u8 vendor_ctrl __deprecated; /* vendor control reg width */
48 u8 vendor_stat; /* vendor status reg witdh */ 48 u8 vendor_stat __deprecated; /* vendor status reg witdh */
49 u8 dma_req_chan; /* bitmask for required dma channels */ 49 u8 dma_req_chan __deprecated; /* bitmask for required dma channels */
50 u8 ram_bits; /* ram address size */ 50 u8 ram_bits; /* ram address size */
51 51
52 struct musb_hdrc_eps_bits *eps_bits; 52 struct musb_hdrc_eps_bits *eps_bits __deprecated;
53#ifdef CONFIG_BLACKFIN 53#ifdef CONFIG_BLACKFIN
54 /* A GPIO controlling VRSEL in Blackfin */ 54 /* A GPIO controlling VRSEL in Blackfin */
55 unsigned int gpio_vrsel; 55 unsigned int gpio_vrsel;
@@ -76,6 +76,9 @@ struct musb_hdrc_platform_data {
76 /* (HOST or OTG) msec/2 after VBUS on till power good */ 76 /* (HOST or OTG) msec/2 after VBUS on till power good */
77 u8 potpgt; 77 u8 potpgt;
78 78
79 /* (HOST or OTG) program PHY for external Vbus */
80 unsigned extvbus:1;
81
79 /* Power the device on or off */ 82 /* Power the device on or off */
80 int (*set_power)(int state); 83 int (*set_power)(int state);
81 84
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index fef0972c8146..f8302d036a76 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -9,6 +9,8 @@
9#ifndef __LINUX_USB_OTG_H 9#ifndef __LINUX_USB_OTG_H
10#define __LINUX_USB_OTG_H 10#define __LINUX_USB_OTG_H
11 11
12#include <linux/notifier.h>
13
12/* OTG defines lots of enumeration states before device reset */ 14/* OTG defines lots of enumeration states before device reset */
13enum usb_otg_state { 15enum usb_otg_state {
14 OTG_STATE_UNDEFINED = 0, 16 OTG_STATE_UNDEFINED = 0,
@@ -33,6 +35,14 @@ enum usb_otg_state {
33 OTG_STATE_A_VBUS_ERR, 35 OTG_STATE_A_VBUS_ERR,
34}; 36};
35 37
38enum usb_xceiv_events {
39 USB_EVENT_NONE, /* no events or cable disconnected */
40 USB_EVENT_VBUS, /* vbus valid event */
41 USB_EVENT_ID, /* id was grounded */
42 USB_EVENT_CHARGER, /* usb dedicated charger */
43 USB_EVENT_ENUMERATED, /* gadget driver enumerated */
44};
45
36#define USB_OTG_PULLUP_ID (1 << 0) 46#define USB_OTG_PULLUP_ID (1 << 0)
37#define USB_OTG_PULLDOWN_DP (1 << 1) 47#define USB_OTG_PULLDOWN_DP (1 << 1)
38#define USB_OTG_PULLDOWN_DM (1 << 2) 48#define USB_OTG_PULLDOWN_DM (1 << 2)
@@ -70,6 +80,9 @@ struct otg_transceiver {
70 struct otg_io_access_ops *io_ops; 80 struct otg_io_access_ops *io_ops;
71 void __iomem *io_priv; 81 void __iomem *io_priv;
72 82
83 /* for notification of usb_xceiv_events */
84 struct blocking_notifier_head notifier;
85
73 /* to pass extra port status to the root hub */ 86 /* to pass extra port status to the root hub */
74 u16 port_status; 87 u16 port_status;
75 u16 port_change; 88 u16 port_change;
@@ -213,6 +226,18 @@ otg_start_srp(struct otg_transceiver *otg)
213 return otg->start_srp(otg); 226 return otg->start_srp(otg);
214} 227}
215 228
229/* notifiers */
230static inline int
231otg_register_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
232{
233 return blocking_notifier_chain_register(&otg->notifier, nb);
234}
235
236static inline void
237otg_unregister_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
238{
239 blocking_notifier_chain_unregister(&otg->notifier, nb);
240}
216 241
217/* for OTG controller drivers (and maybe other stuff) */ 242/* for OTG controller drivers (and maybe other stuff) */
218extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num); 243extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 2526f3bbd273..0a555dd131fc 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -19,4 +19,7 @@
19/* device can't handle its Configuration or Interface strings */ 19/* device can't handle its Configuration or Interface strings */
20#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008 20#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008
21 21
22/*device will morph if reset, don't use reset for handling errors */
23#define USB_QUIRK_RESET_MORPHS 0x00000010
24
22#endif /* __LINUX_USB_QUIRKS_H */ 25#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1819396ed501..0a458b861933 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -351,14 +351,11 @@ static inline void usb_serial_debug_data(int debug,
351 351
352/* Use our own dbg macro */ 352/* Use our own dbg macro */
353#undef dbg 353#undef dbg
354#define dbg(format, arg...) \ 354#define dbg(format, arg...) \
355 do { \ 355do { \
356 if (debug) \ 356 if (debug) \
357 printk(KERN_DEBUG "%s: " format "\n" , __FILE__ , \ 357 printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
358 ## arg); \ 358} while (0)
359 } while (0)
360
361
362 359
363#endif /* __LINUX_USB_SERIAL_H */ 360#endif /* __LINUX_USB_SERIAL_H */
364 361
diff --git a/include/linux/usb/vstusb.h b/include/linux/usb/vstusb.h
deleted file mode 100644
index 1cfac67191ff..000000000000
--- a/include/linux/usb/vstusb.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*****************************************************************************
2 * File: drivers/usb/misc/vstusb.h
3 *
4 * Purpose: Support for the bulk USB Vernier Spectrophotometers
5 *
6 * Author: EQware Engineering, Inc.
7 * Oregon City, OR, USA 97045
8 *
9 * Copyright: 2007, 2008
10 * Vernier Software & Technology
11 * Beaverton, OR, USA 97005
12 *
13 * Web: www.vernier.com
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 *****************************************************************************/
20/*****************************************************************************
21 *
22 * The vstusb module is a standard usb 'client' driver running on top of the
23 * standard usb host controller stack.
24 *
25 * In general, vstusb supports standard bulk usb pipes. It supports multiple
26 * devices and multiple pipes per device.
27 *
28 * The vstusb driver supports two interfaces:
29 * 1 - ioctl SEND_PIPE/RECV_PIPE - a general bulk write/read msg
30 * interface to any pipe with timeout support;
31 * 2 - standard read/write with ioctl config - offers standard read/write
32 * interface with ioctl configured pipes and timeouts.
33 *
34 * Both interfaces can be signal from other process and will abort its i/o
35 * operation.
36 *
37 * A timeout of 0 means NO timeout. The user can still terminate the read via
38 * signal.
39 *
40 * If using multiple threads with this driver, the user should ensure that
41 * any reads, writes, or ioctls are complete before closing the device.
42 * Changing read/write timeouts or pipes takes effect on next read/write.
43 *
44 *****************************************************************************/
45
46struct vstusb_args {
47 union {
48 /* this struct is used for IOCTL_VSTUSB_SEND_PIPE, *
49 * IOCTL_VSTUSB_RECV_PIPE, and read()/write() fops */
50 struct {
51 void __user *buffer;
52 size_t count;
53 unsigned int timeout_ms;
54 int pipe;
55 };
56
57 /* this one is used for IOCTL_VSTUSB_CONFIG_RW */
58 struct {
59 int rd_pipe;
60 int rd_timeout_ms;
61 int wr_pipe;
62 int wr_timeout_ms;
63 };
64 };
65};
66
67#define VST_IOC_MAGIC 'L'
68#define VST_IOC_FIRST 0x20
69#define IOCTL_VSTUSB_SEND_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST)
70#define IOCTL_VSTUSB_RECV_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 1)
71#define IOCTL_VSTUSB_CONFIG_RW _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 2)
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
new file mode 100644
index 000000000000..ae9ab13b963d
--- /dev/null
+++ b/include/linux/vga_switcheroo.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com>
4 *
5 * Licensed under GPLv2
6 *
7 * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
8 */
9
10#include <linux/fb.h>
11
12enum vga_switcheroo_state {
13 VGA_SWITCHEROO_OFF,
14 VGA_SWITCHEROO_ON,
15};
16
17enum vga_switcheroo_client_id {
18 VGA_SWITCHEROO_IGD,
19 VGA_SWITCHEROO_DIS,
20 VGA_SWITCHEROO_MAX_CLIENTS,
21};
22
23struct vga_switcheroo_handler {
24 int (*switchto)(enum vga_switcheroo_client_id id);
25 int (*power_state)(enum vga_switcheroo_client_id id,
26 enum vga_switcheroo_state state);
27 int (*init)(void);
28 int (*get_client_id)(struct pci_dev *pdev);
29};
30
31
32#if defined(CONFIG_VGA_SWITCHEROO)
33void vga_switcheroo_unregister_client(struct pci_dev *dev);
34int vga_switcheroo_register_client(struct pci_dev *dev,
35 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
36 bool (*can_switch)(struct pci_dev *dev));
37
38void vga_switcheroo_client_fb_set(struct pci_dev *dev,
39 struct fb_info *info);
40
41int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler);
42void vga_switcheroo_unregister_handler(void);
43
44int vga_switcheroo_process_delayed_switch(void);
45
46#else
47
48static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
49static inline int vga_switcheroo_register_client(struct pci_dev *dev,
50 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
51 bool (*can_switch)(struct pci_dev *dev)) { return 0; }
52static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
53static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
54static inline void vga_switcheroo_unregister_handler(void) {}
55static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
56
57#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index ee03bba9c5df..117f0dd8ad03 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -78,22 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
78 78
79static inline void __count_vm_event(enum vm_event_item item) 79static inline void __count_vm_event(enum vm_event_item item)
80{ 80{
81 __this_cpu_inc(per_cpu_var(vm_event_states).event[item]); 81 __this_cpu_inc(vm_event_states.event[item]);
82} 82}
83 83
84static inline void count_vm_event(enum vm_event_item item) 84static inline void count_vm_event(enum vm_event_item item)
85{ 85{
86 this_cpu_inc(per_cpu_var(vm_event_states).event[item]); 86 this_cpu_inc(vm_event_states.event[item]);
87} 87}
88 88
89static inline void __count_vm_events(enum vm_event_item item, long delta) 89static inline void __count_vm_events(enum vm_event_item item, long delta)
90{ 90{
91 __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); 91 __this_cpu_add(vm_event_states.event[item], delta);
92} 92}
93 93
94static inline void count_vm_events(enum vm_event_item item, long delta) 94static inline void count_vm_events(enum vm_event_item item, long delta)
95{ 95{
96 this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); 96 this_cpu_add(vm_event_states.event[item], delta);
97} 97}
98 98
99extern void all_vm_events(unsigned long *); 99extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index d5dd0bc408fd..778b7b2a47d4 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -27,7 +27,7 @@ struct vt_mode {
27#define VT_SETMODE 0x5602 /* set mode of active vt */ 27#define VT_SETMODE 0x5602 /* set mode of active vt */
28#define VT_AUTO 0x00 /* auto vt switching */ 28#define VT_AUTO 0x00 /* auto vt switching */
29#define VT_PROCESS 0x01 /* process controls switching */ 29#define VT_PROCESS 0x01 /* process controls switching */
30#define VT_ACKACQ 0x02 /* acknowledge switch */ 30#define VT_PROCESS_AUTO 0x02 /* process is notified of switching */
31 31
32struct vt_stat { 32struct vt_stat {
33 unsigned short v_active; /* active vt */ 33 unsigned short v_active; /* active vt */
@@ -38,6 +38,7 @@ struct vt_stat {
38#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */ 38#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */
39 39
40#define VT_RELDISP 0x5605 /* release display */ 40#define VT_RELDISP 0x5605 /* release display */
41#define VT_ACKACQ 0x02 /* acknowledge switch */
41 42
42#define VT_ACTIVATE 0x5606 /* make vt active */ 43#define VT_ACTIVATE 0x5606 /* make vt active */
43#define VT_WAITACTIVE 0x5607 /* wait for vt active */ 44#define VT_WAITACTIVE 0x5607 /* wait for vt active */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index d7fc45c4eba9..cbb50f4da3dd 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -232,6 +232,7 @@ void ib_unpack(const struct ib_field *desc,
232 232
233void ib_ud_header_init(int payload_bytes, 233void ib_ud_header_init(int payload_bytes,
234 int grh_present, 234 int grh_present,
235 int immediate_present,
235 struct ib_ud_header *header); 236 struct ib_ud_header *header);
236 237
237int ib_ud_header_pack(struct ib_ud_header *header, 238int ib_ud_header_pack(struct ib_ud_header *header,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 09509edb1c5f..a585e0f92bc3 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -984,9 +984,9 @@ struct ib_device {
984 struct list_head event_handler_list; 984 struct list_head event_handler_list;
985 spinlock_t event_handler_lock; 985 spinlock_t event_handler_lock;
986 986
987 spinlock_t client_data_lock;
987 struct list_head core_list; 988 struct list_head core_list;
988 struct list_head client_data_list; 989 struct list_head client_data_list;
989 spinlock_t client_data_lock;
990 990
991 struct ib_cache cache; 991 struct ib_cache cache;
992 int *pkey_tbl_len; 992 int *pkey_tbl_len;
@@ -1144,8 +1144,8 @@ struct ib_device {
1144 IB_DEV_UNREGISTERED 1144 IB_DEV_UNREGISTERED
1145 } reg_state; 1145 } reg_state;
1146 1146
1147 u64 uverbs_cmd_mask;
1148 int uverbs_abi_ver; 1147 int uverbs_abi_ver;
1148 u64 uverbs_cmd_mask;
1149 1149
1150 char node_desc[64]; 1150 char node_desc[64];
1151 __be64 node_guid; 1151 __be64 node_guid;
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index c6b2962315b3..4fae90304648 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -67,7 +67,6 @@ enum rdma_port_space {
67 RDMA_PS_IPOIB = 0x0002, 67 RDMA_PS_IPOIB = 0x0002,
68 RDMA_PS_TCP = 0x0106, 68 RDMA_PS_TCP = 0x0106,
69 RDMA_PS_UDP = 0x0111, 69 RDMA_PS_UDP = 0x0111,
70 RDMA_PS_SCTP = 0x0183
71}; 70};
72 71
73struct rdma_addr { 72struct rdma_addr {
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d0b6cd3afb2f..2aa6aa3e8f61 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -874,6 +874,107 @@ TRACE_EVENT(ext4_forget,
874 __entry->mode, __entry->is_metadata, __entry->block) 874 __entry->mode, __entry->is_metadata, __entry->block)
875); 875);
876 876
877TRACE_EVENT(ext4_da_update_reserve_space,
878 TP_PROTO(struct inode *inode, int used_blocks),
879
880 TP_ARGS(inode, used_blocks),
881
882 TP_STRUCT__entry(
883 __field( dev_t, dev )
884 __field( ino_t, ino )
885 __field( umode_t, mode )
886 __field( __u64, i_blocks )
887 __field( int, used_blocks )
888 __field( int, reserved_data_blocks )
889 __field( int, reserved_meta_blocks )
890 __field( int, allocated_meta_blocks )
891 ),
892
893 TP_fast_assign(
894 __entry->dev = inode->i_sb->s_dev;
895 __entry->ino = inode->i_ino;
896 __entry->mode = inode->i_mode;
897 __entry->i_blocks = inode->i_blocks;
898 __entry->used_blocks = used_blocks;
899 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
900 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
901 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
902 ),
903
904 TP_printk("dev %s ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
905 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
906 __entry->mode, (unsigned long long) __entry->i_blocks,
907 __entry->used_blocks, __entry->reserved_data_blocks,
908 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
909);
910
911TRACE_EVENT(ext4_da_reserve_space,
912 TP_PROTO(struct inode *inode, int md_needed),
913
914 TP_ARGS(inode, md_needed),
915
916 TP_STRUCT__entry(
917 __field( dev_t, dev )
918 __field( ino_t, ino )
919 __field( umode_t, mode )
920 __field( __u64, i_blocks )
921 __field( int, md_needed )
922 __field( int, reserved_data_blocks )
923 __field( int, reserved_meta_blocks )
924 ),
925
926 TP_fast_assign(
927 __entry->dev = inode->i_sb->s_dev;
928 __entry->ino = inode->i_ino;
929 __entry->mode = inode->i_mode;
930 __entry->i_blocks = inode->i_blocks;
931 __entry->md_needed = md_needed;
932 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
933 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
934 ),
935
936 TP_printk("dev %s ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
937 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
938 __entry->mode, (unsigned long long) __entry->i_blocks,
939 __entry->md_needed, __entry->reserved_data_blocks,
940 __entry->reserved_meta_blocks)
941);
942
943TRACE_EVENT(ext4_da_release_space,
944 TP_PROTO(struct inode *inode, int freed_blocks),
945
946 TP_ARGS(inode, freed_blocks),
947
948 TP_STRUCT__entry(
949 __field( dev_t, dev )
950 __field( ino_t, ino )
951 __field( umode_t, mode )
952 __field( __u64, i_blocks )
953 __field( int, freed_blocks )
954 __field( int, reserved_data_blocks )
955 __field( int, reserved_meta_blocks )
956 __field( int, allocated_meta_blocks )
957 ),
958
959 TP_fast_assign(
960 __entry->dev = inode->i_sb->s_dev;
961 __entry->ino = inode->i_ino;
962 __entry->mode = inode->i_mode;
963 __entry->i_blocks = inode->i_blocks;
964 __entry->freed_blocks = freed_blocks;
965 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
966 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
967 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
968 ),
969
970 TP_printk("dev %s ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
971 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
972 __entry->mode, (unsigned long long) __entry->i_blocks,
973 __entry->freed_blocks, __entry->reserved_data_blocks,
974 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
975);
976
977
877#endif /* _TRACE_EXT4_H */ 978#endif /* _TRACE_EXT4_H */
878 979
879/* This part must be outside protection */ 980/* This part must be outside protection */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 96b370a050de..bf16545cc977 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -199,6 +199,34 @@ TRACE_EVENT(jbd2_checkpoint_stats,
199 __entry->forced_to_close, __entry->written, __entry->dropped) 199 __entry->forced_to_close, __entry->written, __entry->dropped)
200); 200);
201 201
202TRACE_EVENT(jbd2_cleanup_journal_tail,
203
204 TP_PROTO(journal_t *journal, tid_t first_tid,
205 unsigned long block_nr, unsigned long freed),
206
207 TP_ARGS(journal, first_tid, block_nr, freed),
208
209 TP_STRUCT__entry(
210 __field( dev_t, dev )
211 __field( tid_t, tail_sequence )
212 __field( tid_t, first_tid )
213 __field(unsigned long, block_nr )
214 __field(unsigned long, freed )
215 ),
216
217 TP_fast_assign(
218 __entry->dev = journal->j_fs_dev->bd_dev;
219 __entry->tail_sequence = journal->j_tail_sequence;
220 __entry->first_tid = first_tid;
221 __entry->block_nr = block_nr;
222 __entry->freed = freed;
223 ),
224
225 TP_printk("dev %s from %u to %u offset %lu freed %lu",
226 jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
227 __entry->first_tid, __entry->block_nr, __entry->freed)
228);
229
202#endif /* _TRACE_JBD2_H */ 230#endif /* _TRACE_JBD2_H */
203 231
204/* This part must be outside protection */ 232/* This part must be outside protection */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index dbe108455275..b17d49dfc3ef 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -145,6 +145,47 @@ TRACE_EVENT(kvm_mmio,
145 __entry->len, __entry->gpa, __entry->val) 145 __entry->len, __entry->gpa, __entry->val)
146); 146);
147 147
148#define kvm_fpu_load_symbol \
149 {0, "unload"}, \
150 {1, "load"}
151
152TRACE_EVENT(kvm_fpu,
153 TP_PROTO(int load),
154 TP_ARGS(load),
155
156 TP_STRUCT__entry(
157 __field( u32, load )
158 ),
159
160 TP_fast_assign(
161 __entry->load = load;
162 ),
163
164 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
165);
166
167TRACE_EVENT(kvm_age_page,
168 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
169 TP_ARGS(hva, slot, ref),
170
171 TP_STRUCT__entry(
172 __field( u64, hva )
173 __field( u64, gfn )
174 __field( u8, referenced )
175 ),
176
177 TP_fast_assign(
178 __entry->hva = hva;
179 __entry->gfn =
180 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
181 __entry->referenced = ref;
182 ),
183
184 TP_printk("hva %llx gfn %llx %s",
185 __entry->hva, __entry->gfn,
186 __entry->referenced ? "YOUNG" : "OLD")
187);
188
148#endif /* _TRACE_KVM_MAIN_H */ 189#endif /* _TRACE_KVM_MAIN_H */
149 190
150/* This part must be outside protection */ 191/* This part must be outside protection */
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 614241b5200c..2b108538d0d9 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -30,11 +30,7 @@ static int __init do_linuxrc(void * shell)
30 extern char * envp_init[]; 30 extern char * envp_init[];
31 31
32 sys_close(old_fd);sys_close(root_fd); 32 sys_close(old_fd);sys_close(root_fd);
33 sys_close(0);sys_close(1);sys_close(2);
34 sys_setsid(); 33 sys_setsid();
35 (void) sys_open("/dev/console",O_RDWR,0);
36 (void) sys_dup(0);
37 (void) sys_dup(0);
38 return kernel_execve(shell, argv, envp_init); 34 return kernel_execve(shell, argv, envp_init);
39} 35}
40 36
diff --git a/init/main.c b/init/main.c
index c75dcd6eef09..40aaa020cd68 100644
--- a/init/main.c
+++ b/init/main.c
@@ -149,6 +149,20 @@ static int __init nosmp(char *str)
149 149
150early_param("nosmp", nosmp); 150early_param("nosmp", nosmp);
151 151
152/* this is hard limit */
153static int __init nrcpus(char *str)
154{
155 int nr_cpus;
156
157 get_option(&str, &nr_cpus);
158 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
159 nr_cpu_ids = nr_cpus;
160
161 return 0;
162}
163
164early_param("nr_cpus", nrcpus);
165
152static int __init maxcpus(char *str) 166static int __init maxcpus(char *str)
153{ 167{
154 get_option(&str, &setup_max_cpus); 168 get_option(&str, &setup_max_cpus);
@@ -586,6 +600,7 @@ asmlinkage void __init start_kernel(void)
586 local_irq_disable(); 600 local_irq_disable();
587 } 601 }
588 rcu_init(); 602 rcu_init();
603 radix_tree_init();
589 /* init some links before init_ISA_irqs() */ 604 /* init some links before init_ISA_irqs() */
590 early_irq_init(); 605 early_irq_init();
591 init_IRQ(); 606 init_IRQ();
@@ -659,7 +674,6 @@ asmlinkage void __init start_kernel(void)
659 proc_caches_init(); 674 proc_caches_init();
660 buffer_init(); 675 buffer_init();
661 key_init(); 676 key_init();
662 radix_tree_init();
663 security_init(); 677 security_init();
664 vfs_caches_init(totalram_pages); 678 vfs_caches_init(totalram_pages);
665 signals_init(); 679 signals_init();
@@ -808,11 +822,6 @@ static noinline int init_post(void)
808 system_state = SYSTEM_RUNNING; 822 system_state = SYSTEM_RUNNING;
809 numa_default_policy(); 823 numa_default_policy();
810 824
811 if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
812 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
813
814 (void) sys_dup(0);
815 (void) sys_dup(0);
816 825
817 current->signal->flags |= SIGNAL_UNKILLABLE; 826 current->signal->flags |= SIGNAL_UNKILLABLE;
818 827
@@ -875,6 +884,12 @@ static int __init kernel_init(void * unused)
875 884
876 do_basic_setup(); 885 do_basic_setup();
877 886
887 /* Open the /dev/console on the rootfs, this should never fail */
888 if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
889 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
890
891 (void) sys_dup(0);
892 (void) sys_dup(0);
878 /* 893 /*
879 * check if there is an early userspace init. If yes, let it do all 894 * check if there is an early userspace init. If yes, let it do all
880 * the work 895 * the work
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c79bd57353e7..b6cb06451f4b 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -134,7 +134,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
134 init_waitqueue_head(&info->wait_q); 134 init_waitqueue_head(&info->wait_q);
135 INIT_LIST_HEAD(&info->e_wait_q[0].list); 135 INIT_LIST_HEAD(&info->e_wait_q[0].list);
136 INIT_LIST_HEAD(&info->e_wait_q[1].list); 136 INIT_LIST_HEAD(&info->e_wait_q[1].list);
137 info->messages = NULL;
138 info->notify_owner = NULL; 137 info->notify_owner = NULL;
139 info->qsize = 0; 138 info->qsize = 0;
140 info->user = NULL; /* set when all is ok */ 139 info->user = NULL; /* set when all is ok */
@@ -146,6 +145,10 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
146 info->attr.mq_msgsize = attr->mq_msgsize; 145 info->attr.mq_msgsize = attr->mq_msgsize;
147 } 146 }
148 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 147 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
148 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
149 if (!info->messages)
150 goto out_inode;
151
149 mq_bytes = (mq_msg_tblsz + 152 mq_bytes = (mq_msg_tblsz +
150 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 153 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
151 154
@@ -154,18 +157,12 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
154 u->mq_bytes + mq_bytes > 157 u->mq_bytes + mq_bytes >
155 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { 158 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
156 spin_unlock(&mq_lock); 159 spin_unlock(&mq_lock);
160 kfree(info->messages);
157 goto out_inode; 161 goto out_inode;
158 } 162 }
159 u->mq_bytes += mq_bytes; 163 u->mq_bytes += mq_bytes;
160 spin_unlock(&mq_lock); 164 spin_unlock(&mq_lock);
161 165
162 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
163 if (!info->messages) {
164 spin_lock(&mq_lock);
165 u->mq_bytes -= mq_bytes;
166 spin_unlock(&mq_lock);
167 goto out_inode;
168 }
169 /* all is ok */ 166 /* all is ok */
170 info->user = get_uid(u); 167 info->user = get_uid(u);
171 } else if (S_ISDIR(mode)) { 168 } else if (S_ISDIR(mode)) {
@@ -187,7 +184,7 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
187{ 184{
188 struct inode *inode; 185 struct inode *inode;
189 struct ipc_namespace *ns = data; 186 struct ipc_namespace *ns = data;
190 int error = 0; 187 int error;
191 188
192 sb->s_blocksize = PAGE_CACHE_SIZE; 189 sb->s_blocksize = PAGE_CACHE_SIZE;
193 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 190 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -205,7 +202,9 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
205 if (!sb->s_root) { 202 if (!sb->s_root) {
206 iput(inode); 203 iput(inode);
207 error = -ENOMEM; 204 error = -ENOMEM;
205 goto out;
208 } 206 }
207 error = 0;
209 208
210out: 209out:
211 return error; 210 return error;
@@ -264,8 +263,9 @@ static void mqueue_delete_inode(struct inode *inode)
264 263
265 clear_inode(inode); 264 clear_inode(inode);
266 265
267 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + 266 /* Total amount of bytes accounted for the mqueue */
268 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 267 mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
268 + info->attr.mq_msgsize);
269 user = info->user; 269 user = info->user;
270 if (user) { 270 if (user) {
271 spin_lock(&mq_lock); 271 spin_lock(&mq_lock);
@@ -604,8 +604,8 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
604 /* check for overflow */ 604 /* check for overflow */
605 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 605 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
606 return 0; 606 return 0;
607 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + 607 if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
608 (attr->mq_maxmsg * sizeof (struct msg_msg *)) < 608 + sizeof (struct msg_msg *))) <
609 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 609 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
610 return 0; 610 return 0;
611 return 1; 611 return 1;
@@ -623,9 +623,10 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
623 int ret; 623 int ret;
624 624
625 if (attr) { 625 if (attr) {
626 ret = -EINVAL; 626 if (!mq_attr_ok(ipc_ns, attr)) {
627 if (!mq_attr_ok(ipc_ns, attr)) 627 ret = -EINVAL;
628 goto out; 628 goto out;
629 }
629 /* store for use during create */ 630 /* store for use during create */
630 dentry->d_fsdata = attr; 631 dentry->d_fsdata = attr;
631 } 632 }
@@ -659,24 +660,28 @@ out:
659static struct file *do_open(struct ipc_namespace *ipc_ns, 660static struct file *do_open(struct ipc_namespace *ipc_ns,
660 struct dentry *dentry, int oflag) 661 struct dentry *dentry, int oflag)
661{ 662{
663 int ret;
662 const struct cred *cred = current_cred(); 664 const struct cred *cred = current_cred();
663 665
664 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 666 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
665 MAY_READ | MAY_WRITE }; 667 MAY_READ | MAY_WRITE };
666 668
667 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 669 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
668 dput(dentry); 670 ret = -EINVAL;
669 mntput(ipc_ns->mq_mnt); 671 goto err;
670 return ERR_PTR(-EINVAL);
671 } 672 }
672 673
673 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { 674 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
674 dput(dentry); 675 ret = -EACCES;
675 mntput(ipc_ns->mq_mnt); 676 goto err;
676 return ERR_PTR(-EACCES);
677 } 677 }
678 678
679 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 679 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
680
681err:
682 dput(dentry);
683 mntput(ipc_ns->mq_mnt);
684 return ERR_PTR(ret);
680} 685}
681 686
682SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode, 687SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
@@ -705,16 +710,17 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
705 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 710 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
706 if (IS_ERR(dentry)) { 711 if (IS_ERR(dentry)) {
707 error = PTR_ERR(dentry); 712 error = PTR_ERR(dentry);
708 goto out_err; 713 goto out_putfd;
709 } 714 }
710 mntget(ipc_ns->mq_mnt); 715 mntget(ipc_ns->mq_mnt);
711 716
712 if (oflag & O_CREAT) { 717 if (oflag & O_CREAT) {
713 if (dentry->d_inode) { /* entry already exists */ 718 if (dentry->d_inode) { /* entry already exists */
714 audit_inode(name, dentry); 719 audit_inode(name, dentry);
715 error = -EEXIST; 720 if (oflag & O_EXCL) {
716 if (oflag & O_EXCL) 721 error = -EEXIST;
717 goto out; 722 goto out;
723 }
718 filp = do_open(ipc_ns, dentry, oflag); 724 filp = do_open(ipc_ns, dentry, oflag);
719 } else { 725 } else {
720 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, 726 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
@@ -722,9 +728,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
722 u_attr ? &attr : NULL); 728 u_attr ? &attr : NULL);
723 } 729 }
724 } else { 730 } else {
725 error = -ENOENT; 731 if (!dentry->d_inode) {
726 if (!dentry->d_inode) 732 error = -ENOENT;
727 goto out; 733 goto out;
734 }
728 audit_inode(name, dentry); 735 audit_inode(name, dentry);
729 filp = do_open(ipc_ns, dentry, oflag); 736 filp = do_open(ipc_ns, dentry, oflag);
730 } 737 }
@@ -742,7 +749,6 @@ out:
742 mntput(ipc_ns->mq_mnt); 749 mntput(ipc_ns->mq_mnt);
743out_putfd: 750out_putfd:
744 put_unused_fd(fd); 751 put_unused_fd(fd);
745out_err:
746 fd = error; 752 fd = error;
747out_upsem: 753out_upsem:
748 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 754 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
@@ -872,19 +878,24 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
872 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p); 878 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p);
873 timeout = prepare_timeout(p); 879 timeout = prepare_timeout(p);
874 880
875 ret = -EBADF;
876 filp = fget(mqdes); 881 filp = fget(mqdes);
877 if (unlikely(!filp)) 882 if (unlikely(!filp)) {
883 ret = -EBADF;
878 goto out; 884 goto out;
885 }
879 886
880 inode = filp->f_path.dentry->d_inode; 887 inode = filp->f_path.dentry->d_inode;
881 if (unlikely(filp->f_op != &mqueue_file_operations)) 888 if (unlikely(filp->f_op != &mqueue_file_operations)) {
889 ret = -EBADF;
882 goto out_fput; 890 goto out_fput;
891 }
883 info = MQUEUE_I(inode); 892 info = MQUEUE_I(inode);
884 audit_inode(NULL, filp->f_path.dentry); 893 audit_inode(NULL, filp->f_path.dentry);
885 894
886 if (unlikely(!(filp->f_mode & FMODE_WRITE))) 895 if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
896 ret = -EBADF;
887 goto out_fput; 897 goto out_fput;
898 }
888 899
889 if (unlikely(msg_len > info->attr.mq_msgsize)) { 900 if (unlikely(msg_len > info->attr.mq_msgsize)) {
890 ret = -EMSGSIZE; 901 ret = -EMSGSIZE;
@@ -961,19 +972,24 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
961 audit_mq_sendrecv(mqdes, msg_len, 0, p); 972 audit_mq_sendrecv(mqdes, msg_len, 0, p);
962 timeout = prepare_timeout(p); 973 timeout = prepare_timeout(p);
963 974
964 ret = -EBADF;
965 filp = fget(mqdes); 975 filp = fget(mqdes);
966 if (unlikely(!filp)) 976 if (unlikely(!filp)) {
977 ret = -EBADF;
967 goto out; 978 goto out;
979 }
968 980
969 inode = filp->f_path.dentry->d_inode; 981 inode = filp->f_path.dentry->d_inode;
970 if (unlikely(filp->f_op != &mqueue_file_operations)) 982 if (unlikely(filp->f_op != &mqueue_file_operations)) {
983 ret = -EBADF;
971 goto out_fput; 984 goto out_fput;
985 }
972 info = MQUEUE_I(inode); 986 info = MQUEUE_I(inode);
973 audit_inode(NULL, filp->f_path.dentry); 987 audit_inode(NULL, filp->f_path.dentry);
974 988
975 if (unlikely(!(filp->f_mode & FMODE_READ))) 989 if (unlikely(!(filp->f_mode & FMODE_READ))) {
990 ret = -EBADF;
976 goto out_fput; 991 goto out_fput;
992 }
977 993
978 /* checks if buffer is big enough */ 994 /* checks if buffer is big enough */
979 if (unlikely(msg_len < info->attr.mq_msgsize)) { 995 if (unlikely(msg_len < info->attr.mq_msgsize)) {
@@ -1063,13 +1079,14 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1063 1079
1064 /* create the notify skb */ 1080 /* create the notify skb */
1065 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1081 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1066 ret = -ENOMEM; 1082 if (!nc) {
1067 if (!nc) 1083 ret = -ENOMEM;
1068 goto out; 1084 goto out;
1069 ret = -EFAULT; 1085 }
1070 if (copy_from_user(nc->data, 1086 if (copy_from_user(nc->data,
1071 notification.sigev_value.sival_ptr, 1087 notification.sigev_value.sival_ptr,
1072 NOTIFY_COOKIE_LEN)) { 1088 NOTIFY_COOKIE_LEN)) {
1089 ret = -EFAULT;
1073 goto out; 1090 goto out;
1074 } 1091 }
1075 1092
@@ -1078,9 +1095,10 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1078 /* and attach it to the socket */ 1095 /* and attach it to the socket */
1079retry: 1096retry:
1080 filp = fget(notification.sigev_signo); 1097 filp = fget(notification.sigev_signo);
1081 ret = -EBADF; 1098 if (!filp) {
1082 if (!filp) 1099 ret = -EBADF;
1083 goto out; 1100 goto out;
1101 }
1084 sock = netlink_getsockbyfilp(filp); 1102 sock = netlink_getsockbyfilp(filp);
1085 fput(filp); 1103 fput(filp);
1086 if (IS_ERR(sock)) { 1104 if (IS_ERR(sock)) {
@@ -1092,7 +1110,7 @@ retry:
1092 timeo = MAX_SCHEDULE_TIMEOUT; 1110 timeo = MAX_SCHEDULE_TIMEOUT;
1093 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1111 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1094 if (ret == 1) 1112 if (ret == 1)
1095 goto retry; 1113 goto retry;
1096 if (ret) { 1114 if (ret) {
1097 sock = NULL; 1115 sock = NULL;
1098 nc = NULL; 1116 nc = NULL;
@@ -1101,14 +1119,17 @@ retry:
1101 } 1119 }
1102 } 1120 }
1103 1121
1104 ret = -EBADF;
1105 filp = fget(mqdes); 1122 filp = fget(mqdes);
1106 if (!filp) 1123 if (!filp) {
1124 ret = -EBADF;
1107 goto out; 1125 goto out;
1126 }
1108 1127
1109 inode = filp->f_path.dentry->d_inode; 1128 inode = filp->f_path.dentry->d_inode;
1110 if (unlikely(filp->f_op != &mqueue_file_operations)) 1129 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1130 ret = -EBADF;
1111 goto out_fput; 1131 goto out_fput;
1132 }
1112 info = MQUEUE_I(inode); 1133 info = MQUEUE_I(inode);
1113 1134
1114 ret = 0; 1135 ret = 0;
@@ -1171,14 +1192,17 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1171 return -EINVAL; 1192 return -EINVAL;
1172 } 1193 }
1173 1194
1174 ret = -EBADF;
1175 filp = fget(mqdes); 1195 filp = fget(mqdes);
1176 if (!filp) 1196 if (!filp) {
1197 ret = -EBADF;
1177 goto out; 1198 goto out;
1199 }
1178 1200
1179 inode = filp->f_path.dentry->d_inode; 1201 inode = filp->f_path.dentry->d_inode;
1180 if (unlikely(filp->f_op != &mqueue_file_operations)) 1202 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1203 ret = -EBADF;
1181 goto out_fput; 1204 goto out_fput;
1205 }
1182 info = MQUEUE_I(inode); 1206 info = MQUEUE_I(inode);
1183 1207
1184 spin_lock(&info->lock); 1208 spin_lock(&info->lock);
@@ -1272,7 +1296,7 @@ static int __init init_mqueue_fs(void)
1272 if (mqueue_inode_cachep == NULL) 1296 if (mqueue_inode_cachep == NULL)
1273 return -ENOMEM; 1297 return -ENOMEM;
1274 1298
1275 /* ignore failues - they are not fatal */ 1299 /* ignore failures - they are not fatal */
1276 mq_sysctl_table = mq_register_sysctl_table(); 1300 mq_sysctl_table = mq_register_sysctl_table();
1277 1301
1278 error = register_filesystem(&mqueue_fs_type); 1302 error = register_filesystem(&mqueue_fs_type);
diff --git a/kernel/Makefile b/kernel/Makefile
index 6aebdeb2aa34..7b974699f8c2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ 12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
13 async.o 13 async.o range.o
14obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
14obj-y += groups.o 15obj-y += groups.o
15 16
16ifdef CONFIG_FUNCTION_TRACER 17ifdef CONFIG_FUNCTION_TRACER
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 4b05bd9479db..028e85663f27 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -548,6 +548,11 @@ int audit_remove_tree_rule(struct audit_krule *rule)
548 return 0; 548 return 0;
549} 549}
550 550
551static int compare_root(struct vfsmount *mnt, void *arg)
552{
553 return mnt->mnt_root->d_inode == arg;
554}
555
551void audit_trim_trees(void) 556void audit_trim_trees(void)
552{ 557{
553 struct list_head cursor; 558 struct list_head cursor;
@@ -559,7 +564,6 @@ void audit_trim_trees(void)
559 struct path path; 564 struct path path;
560 struct vfsmount *root_mnt; 565 struct vfsmount *root_mnt;
561 struct node *node; 566 struct node *node;
562 struct list_head list;
563 int err; 567 int err;
564 568
565 tree = container_of(cursor.next, struct audit_tree, list); 569 tree = container_of(cursor.next, struct audit_tree, list);
@@ -577,24 +581,16 @@ void audit_trim_trees(void)
577 if (!root_mnt) 581 if (!root_mnt)
578 goto skip_it; 582 goto skip_it;
579 583
580 list_add_tail(&list, &root_mnt->mnt_list);
581 spin_lock(&hash_lock); 584 spin_lock(&hash_lock);
582 list_for_each_entry(node, &tree->chunks, list) { 585 list_for_each_entry(node, &tree->chunks, list) {
583 struct audit_chunk *chunk = find_chunk(node); 586 struct inode *inode = find_chunk(node)->watch.inode;
584 struct inode *inode = chunk->watch.inode;
585 struct vfsmount *mnt;
586 node->index |= 1U<<31; 587 node->index |= 1U<<31;
587 list_for_each_entry(mnt, &list, mnt_list) { 588 if (iterate_mounts(compare_root, inode, root_mnt))
588 if (mnt->mnt_root->d_inode == inode) { 589 node->index &= ~(1U<<31);
589 node->index &= ~(1U<<31);
590 break;
591 }
592 }
593 } 590 }
594 spin_unlock(&hash_lock); 591 spin_unlock(&hash_lock);
595 trim_marked(tree); 592 trim_marked(tree);
596 put_tree(tree); 593 put_tree(tree);
597 list_del_init(&list);
598 drop_collected_mounts(root_mnt); 594 drop_collected_mounts(root_mnt);
599skip_it: 595skip_it:
600 mutex_lock(&audit_filter_mutex); 596 mutex_lock(&audit_filter_mutex);
@@ -603,22 +599,6 @@ skip_it:
603 mutex_unlock(&audit_filter_mutex); 599 mutex_unlock(&audit_filter_mutex);
604} 600}
605 601
606static int is_under(struct vfsmount *mnt, struct dentry *dentry,
607 struct path *path)
608{
609 if (mnt != path->mnt) {
610 for (;;) {
611 if (mnt->mnt_parent == mnt)
612 return 0;
613 if (mnt->mnt_parent == path->mnt)
614 break;
615 mnt = mnt->mnt_parent;
616 }
617 dentry = mnt->mnt_mountpoint;
618 }
619 return is_subdir(dentry, path->dentry);
620}
621
622int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) 602int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
623{ 603{
624 604
@@ -638,13 +618,17 @@ void audit_put_tree(struct audit_tree *tree)
638 put_tree(tree); 618 put_tree(tree);
639} 619}
640 620
621static int tag_mount(struct vfsmount *mnt, void *arg)
622{
623 return tag_chunk(mnt->mnt_root->d_inode, arg);
624}
625
641/* called with audit_filter_mutex */ 626/* called with audit_filter_mutex */
642int audit_add_tree_rule(struct audit_krule *rule) 627int audit_add_tree_rule(struct audit_krule *rule)
643{ 628{
644 struct audit_tree *seed = rule->tree, *tree; 629 struct audit_tree *seed = rule->tree, *tree;
645 struct path path; 630 struct path path;
646 struct vfsmount *mnt, *p; 631 struct vfsmount *mnt;
647 struct list_head list;
648 int err; 632 int err;
649 633
650 list_for_each_entry(tree, &tree_list, list) { 634 list_for_each_entry(tree, &tree_list, list) {
@@ -670,16 +654,9 @@ int audit_add_tree_rule(struct audit_krule *rule)
670 err = -ENOMEM; 654 err = -ENOMEM;
671 goto Err; 655 goto Err;
672 } 656 }
673 list_add_tail(&list, &mnt->mnt_list);
674 657
675 get_tree(tree); 658 get_tree(tree);
676 list_for_each_entry(p, &list, mnt_list) { 659 err = iterate_mounts(tag_mount, tree, mnt);
677 err = tag_chunk(p->mnt_root->d_inode, tree);
678 if (err)
679 break;
680 }
681
682 list_del(&list);
683 drop_collected_mounts(mnt); 660 drop_collected_mounts(mnt);
684 661
685 if (!err) { 662 if (!err) {
@@ -714,31 +691,23 @@ int audit_tag_tree(char *old, char *new)
714{ 691{
715 struct list_head cursor, barrier; 692 struct list_head cursor, barrier;
716 int failed = 0; 693 int failed = 0;
717 struct path path; 694 struct path path1, path2;
718 struct vfsmount *tagged; 695 struct vfsmount *tagged;
719 struct list_head list;
720 struct vfsmount *mnt;
721 struct dentry *dentry;
722 int err; 696 int err;
723 697
724 err = kern_path(new, 0, &path); 698 err = kern_path(new, 0, &path2);
725 if (err) 699 if (err)
726 return err; 700 return err;
727 tagged = collect_mounts(&path); 701 tagged = collect_mounts(&path2);
728 path_put(&path); 702 path_put(&path2);
729 if (!tagged) 703 if (!tagged)
730 return -ENOMEM; 704 return -ENOMEM;
731 705
732 err = kern_path(old, 0, &path); 706 err = kern_path(old, 0, &path1);
733 if (err) { 707 if (err) {
734 drop_collected_mounts(tagged); 708 drop_collected_mounts(tagged);
735 return err; 709 return err;
736 } 710 }
737 mnt = mntget(path.mnt);
738 dentry = dget(path.dentry);
739 path_put(&path);
740
741 list_add_tail(&list, &tagged->mnt_list);
742 711
743 mutex_lock(&audit_filter_mutex); 712 mutex_lock(&audit_filter_mutex);
744 list_add(&barrier, &tree_list); 713 list_add(&barrier, &tree_list);
@@ -746,7 +715,7 @@ int audit_tag_tree(char *old, char *new)
746 715
747 while (cursor.next != &tree_list) { 716 while (cursor.next != &tree_list) {
748 struct audit_tree *tree; 717 struct audit_tree *tree;
749 struct vfsmount *p; 718 int good_one = 0;
750 719
751 tree = container_of(cursor.next, struct audit_tree, list); 720 tree = container_of(cursor.next, struct audit_tree, list);
752 get_tree(tree); 721 get_tree(tree);
@@ -754,30 +723,19 @@ int audit_tag_tree(char *old, char *new)
754 list_add(&cursor, &tree->list); 723 list_add(&cursor, &tree->list);
755 mutex_unlock(&audit_filter_mutex); 724 mutex_unlock(&audit_filter_mutex);
756 725
757 err = kern_path(tree->pathname, 0, &path); 726 err = kern_path(tree->pathname, 0, &path2);
758 if (err) { 727 if (!err) {
759 put_tree(tree); 728 good_one = path_is_under(&path1, &path2);
760 mutex_lock(&audit_filter_mutex); 729 path_put(&path2);
761 continue;
762 } 730 }
763 731
764 spin_lock(&vfsmount_lock); 732 if (!good_one) {
765 if (!is_under(mnt, dentry, &path)) {
766 spin_unlock(&vfsmount_lock);
767 path_put(&path);
768 put_tree(tree); 733 put_tree(tree);
769 mutex_lock(&audit_filter_mutex); 734 mutex_lock(&audit_filter_mutex);
770 continue; 735 continue;
771 } 736 }
772 spin_unlock(&vfsmount_lock);
773 path_put(&path);
774
775 list_for_each_entry(p, &list, mnt_list) {
776 failed = tag_chunk(p->mnt_root->d_inode, tree);
777 if (failed)
778 break;
779 }
780 737
738 failed = iterate_mounts(tag_mount, tree, tagged);
781 if (failed) { 739 if (failed) {
782 put_tree(tree); 740 put_tree(tree);
783 mutex_lock(&audit_filter_mutex); 741 mutex_lock(&audit_filter_mutex);
@@ -818,10 +776,8 @@ int audit_tag_tree(char *old, char *new)
818 } 776 }
819 list_del(&barrier); 777 list_del(&barrier);
820 list_del(&cursor); 778 list_del(&cursor);
821 list_del(&list);
822 mutex_unlock(&audit_filter_mutex); 779 mutex_unlock(&audit_filter_mutex);
823 dput(dentry); 780 path_put(&path1);
824 mntput(mnt);
825 drop_collected_mounts(tagged); 781 drop_collected_mounts(tagged);
826 return failed; 782 return failed;
827} 783}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index fc0f928167e7..f3a461c0970a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1988,7 +1988,6 @@ void __audit_inode(const char *name, const struct dentry *dentry)
1988 1988
1989/** 1989/**
1990 * audit_inode_child - collect inode info for created/removed objects 1990 * audit_inode_child - collect inode info for created/removed objects
1991 * @dname: inode's dentry name
1992 * @dentry: dentry being audited 1991 * @dentry: dentry being audited
1993 * @parent: inode of dentry parent 1992 * @parent: inode of dentry parent
1994 * 1993 *
@@ -2000,13 +1999,14 @@ void __audit_inode(const char *name, const struct dentry *dentry)
2000 * must be hooked prior, in order to capture the target inode during 1999 * must be hooked prior, in order to capture the target inode during
2001 * unsuccessful attempts. 2000 * unsuccessful attempts.
2002 */ 2001 */
2003void __audit_inode_child(const char *dname, const struct dentry *dentry, 2002void __audit_inode_child(const struct dentry *dentry,
2004 const struct inode *parent) 2003 const struct inode *parent)
2005{ 2004{
2006 int idx; 2005 int idx;
2007 struct audit_context *context = current->audit_context; 2006 struct audit_context *context = current->audit_context;
2008 const char *found_parent = NULL, *found_child = NULL; 2007 const char *found_parent = NULL, *found_child = NULL;
2009 const struct inode *inode = dentry->d_inode; 2008 const struct inode *inode = dentry->d_inode;
2009 const char *dname = dentry->d_name.name;
2010 int dirlen = 0; 2010 int dirlen = 0;
2011 2011
2012 if (!context->in_syscall) 2012 if (!context->in_syscall)
@@ -2014,9 +2014,6 @@ void __audit_inode_child(const char *dname, const struct dentry *dentry,
2014 2014
2015 if (inode) 2015 if (inode)
2016 handle_one(inode); 2016 handle_one(inode);
2017 /* determine matching parent */
2018 if (!dname)
2019 goto add_names;
2020 2017
2021 /* parent is more likely, look for it first */ 2018 /* parent is more likely, look for it first */
2022 for (idx = 0; idx < context->name_count; idx++) { 2019 for (idx = 0; idx < context->name_count; idx++) {
diff --git a/kernel/capability.c b/kernel/capability.c
index 7f876e60521f..9e4697e9b276 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -135,7 +135,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
135 if (pid && (pid != task_pid_vnr(current))) { 135 if (pid && (pid != task_pid_vnr(current))) {
136 struct task_struct *target; 136 struct task_struct *target;
137 137
138 read_lock(&tasklist_lock); 138 rcu_read_lock();
139 139
140 target = find_task_by_vpid(pid); 140 target = find_task_by_vpid(pid);
141 if (!target) 141 if (!target)
@@ -143,7 +143,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
143 else 143 else
144 ret = security_capget(target, pEp, pIp, pPp); 144 ret = security_capget(target, pEp, pIp, pPp);
145 145
146 read_unlock(&tasklist_lock); 146 rcu_read_unlock();
147 } else 147 } else
148 ret = security_capget(current, pEp, pIp, pPp); 148 ret = security_capget(current, pEp, pIp, pPp);
149 149
diff --git a/kernel/early_res.c b/kernel/early_res.c
new file mode 100644
index 000000000000..3cb2c661bb78
--- /dev/null
+++ b/kernel/early_res.c
@@ -0,0 +1,578 @@
1/*
2 * early_res, could be used to replace bootmem
3 */
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/init.h>
7#include <linux/bootmem.h>
8#include <linux/mm.h>
9#include <linux/early_res.h>
10
11/*
12 * Early reserved memory areas.
13 */
14/*
15 * need to make sure this one is bigger enough before
16 * find_fw_memmap_area could be used
17 */
18#define MAX_EARLY_RES_X 32
19
20struct early_res {
21 u64 start, end;
22 char name[15];
23 char overlap_ok;
24};
25static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata;
26
27static int max_early_res __initdata = MAX_EARLY_RES_X;
28static struct early_res *early_res __initdata = &early_res_x[0];
29static int early_res_count __initdata;
30
31static int __init find_overlapped_early(u64 start, u64 end)
32{
33 int i;
34 struct early_res *r;
35
36 for (i = 0; i < max_early_res && early_res[i].end; i++) {
37 r = &early_res[i];
38 if (end > r->start && start < r->end)
39 break;
40 }
41
42 return i;
43}
44
45/*
46 * Drop the i-th range from the early reservation map,
47 * by copying any higher ranges down one over it, and
48 * clearing what had been the last slot.
49 */
50static void __init drop_range(int i)
51{
52 int j;
53
54 for (j = i + 1; j < max_early_res && early_res[j].end; j++)
55 ;
56
57 memmove(&early_res[i], &early_res[i + 1],
58 (j - 1 - i) * sizeof(struct early_res));
59
60 early_res[j - 1].end = 0;
61 early_res_count--;
62}
63
64static void __init drop_range_partial(int i, u64 start, u64 end)
65{
66 u64 common_start, common_end;
67 u64 old_start, old_end;
68
69 old_start = early_res[i].start;
70 old_end = early_res[i].end;
71 common_start = max(old_start, start);
72 common_end = min(old_end, end);
73
74 /* no overlap ? */
75 if (common_start >= common_end)
76 return;
77
78 if (old_start < common_start) {
79 /* make head segment */
80 early_res[i].end = common_start;
81 if (old_end > common_end) {
82 char name[15];
83
84 /*
85 * Save a local copy of the name, since the
86 * early_res array could get resized inside
87 * reserve_early_without_check() ->
88 * __check_and_double_early_res(), which would
89 * make the current name pointer invalid.
90 */
91 strncpy(name, early_res[i].name,
92 sizeof(early_res[i].name) - 1);
93 /* add another for left over on tail */
94 reserve_early_without_check(common_end, old_end, name);
95 }
96 return;
97 } else {
98 if (old_end > common_end) {
99 /* reuse the entry for tail left */
100 early_res[i].start = common_end;
101 return;
102 }
103 /* all covered */
104 drop_range(i);
105 }
106}
107
108/*
109 * Split any existing ranges that:
110 * 1) are marked 'overlap_ok', and
111 * 2) overlap with the stated range [start, end)
112 * into whatever portion (if any) of the existing range is entirely
113 * below or entirely above the stated range. Drop the portion
114 * of the existing range that overlaps with the stated range,
115 * which will allow the caller of this routine to then add that
116 * stated range without conflicting with any existing range.
117 */
118static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
119{
120 int i;
121 struct early_res *r;
122 u64 lower_start, lower_end;
123 u64 upper_start, upper_end;
124 char name[15];
125
126 for (i = 0; i < max_early_res && early_res[i].end; i++) {
127 r = &early_res[i];
128
129 /* Continue past non-overlapping ranges */
130 if (end <= r->start || start >= r->end)
131 continue;
132
133 /*
134 * Leave non-ok overlaps as is; let caller
135 * panic "Overlapping early reservations"
136 * when it hits this overlap.
137 */
138 if (!r->overlap_ok)
139 return;
140
141 /*
142 * We have an ok overlap. We will drop it from the early
143 * reservation map, and add back in any non-overlapping
144 * portions (lower or upper) as separate, overlap_ok,
145 * non-overlapping ranges.
146 */
147
148 /* 1. Note any non-overlapping (lower or upper) ranges. */
149 strncpy(name, r->name, sizeof(name) - 1);
150
151 lower_start = lower_end = 0;
152 upper_start = upper_end = 0;
153 if (r->start < start) {
154 lower_start = r->start;
155 lower_end = start;
156 }
157 if (r->end > end) {
158 upper_start = end;
159 upper_end = r->end;
160 }
161
162 /* 2. Drop the original ok overlapping range */
163 drop_range(i);
164
165 i--; /* resume for-loop on copied down entry */
166
167 /* 3. Add back in any non-overlapping ranges. */
168 if (lower_end)
169 reserve_early_overlap_ok(lower_start, lower_end, name);
170 if (upper_end)
171 reserve_early_overlap_ok(upper_start, upper_end, name);
172 }
173}
174
175static void __init __reserve_early(u64 start, u64 end, char *name,
176 int overlap_ok)
177{
178 int i;
179 struct early_res *r;
180
181 i = find_overlapped_early(start, end);
182 if (i >= max_early_res)
183 panic("Too many early reservations");
184 r = &early_res[i];
185 if (r->end)
186 panic("Overlapping early reservations "
187 "%llx-%llx %s to %llx-%llx %s\n",
188 start, end - 1, name ? name : "", r->start,
189 r->end - 1, r->name);
190 r->start = start;
191 r->end = end;
192 r->overlap_ok = overlap_ok;
193 if (name)
194 strncpy(r->name, name, sizeof(r->name) - 1);
195 early_res_count++;
196}
197
198/*
199 * A few early reservtations come here.
200 *
201 * The 'overlap_ok' in the name of this routine does -not- mean it
202 * is ok for these reservations to overlap an earlier reservation.
203 * Rather it means that it is ok for subsequent reservations to
204 * overlap this one.
205 *
206 * Use this entry point to reserve early ranges when you are doing
207 * so out of "Paranoia", reserving perhaps more memory than you need,
208 * just in case, and don't mind a subsequent overlapping reservation
209 * that is known to be needed.
210 *
211 * The drop_overlaps_that_are_ok() call here isn't really needed.
212 * It would be needed if we had two colliding 'overlap_ok'
213 * reservations, so that the second such would not panic on the
214 * overlap with the first. We don't have any such as of this
215 * writing, but might as well tolerate such if it happens in
216 * the future.
217 */
218void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
219{
220 drop_overlaps_that_are_ok(start, end);
221 __reserve_early(start, end, name, 1);
222}
223
224static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
225{
226 u64 start, end, size, mem;
227 struct early_res *new;
228
229 /* do we have enough slots left ? */
230 if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
231 return;
232
233 /* double it */
234 mem = -1ULL;
235 size = sizeof(struct early_res) * max_early_res * 2;
236 if (early_res == early_res_x)
237 start = 0;
238 else
239 start = early_res[0].end;
240 end = ex_start;
241 if (start + size < end)
242 mem = find_fw_memmap_area(start, end, size,
243 sizeof(struct early_res));
244 if (mem == -1ULL) {
245 start = ex_end;
246 end = get_max_mapped();
247 if (start + size < end)
248 mem = find_fw_memmap_area(start, end, size,
249 sizeof(struct early_res));
250 }
251 if (mem == -1ULL)
252 panic("can not find more space for early_res array");
253
254 new = __va(mem);
255 /* save the first one for own */
256 new[0].start = mem;
257 new[0].end = mem + size;
258 new[0].overlap_ok = 0;
259 /* copy old to new */
260 if (early_res == early_res_x) {
261 memcpy(&new[1], &early_res[0],
262 sizeof(struct early_res) * max_early_res);
263 memset(&new[max_early_res+1], 0,
264 sizeof(struct early_res) * (max_early_res - 1));
265 early_res_count++;
266 } else {
267 memcpy(&new[1], &early_res[1],
268 sizeof(struct early_res) * (max_early_res - 1));
269 memset(&new[max_early_res], 0,
270 sizeof(struct early_res) * max_early_res);
271 }
272 memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
273 early_res = new;
274 max_early_res *= 2;
275 printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n",
276 max_early_res, mem, mem + size - 1);
277}
278
279/*
280 * Most early reservations come here.
281 *
282 * We first have drop_overlaps_that_are_ok() drop any pre-existing
283 * 'overlap_ok' ranges, so that we can then reserve this memory
284 * range without risk of panic'ing on an overlapping overlap_ok
285 * early reservation.
286 */
287void __init reserve_early(u64 start, u64 end, char *name)
288{
289 if (start >= end)
290 return;
291
292 __check_and_double_early_res(start, end);
293
294 drop_overlaps_that_are_ok(start, end);
295 __reserve_early(start, end, name, 0);
296}
297
298void __init reserve_early_without_check(u64 start, u64 end, char *name)
299{
300 struct early_res *r;
301
302 if (start >= end)
303 return;
304
305 __check_and_double_early_res(start, end);
306
307 r = &early_res[early_res_count];
308
309 r->start = start;
310 r->end = end;
311 r->overlap_ok = 0;
312 if (name)
313 strncpy(r->name, name, sizeof(r->name) - 1);
314 early_res_count++;
315}
316
317void __init free_early(u64 start, u64 end)
318{
319 struct early_res *r;
320 int i;
321
322 i = find_overlapped_early(start, end);
323 r = &early_res[i];
324 if (i >= max_early_res || r->end != end || r->start != start)
325 panic("free_early on not reserved area: %llx-%llx!",
326 start, end - 1);
327
328 drop_range(i);
329}
330
331void __init free_early_partial(u64 start, u64 end)
332{
333 struct early_res *r;
334 int i;
335
336try_next:
337 i = find_overlapped_early(start, end);
338 if (i >= max_early_res)
339 return;
340
341 r = &early_res[i];
342 /* hole ? */
343 if (r->end >= end && r->start <= start) {
344 drop_range_partial(i, start, end);
345 return;
346 }
347
348 drop_range_partial(i, start, end);
349 goto try_next;
350}
351
352#ifdef CONFIG_NO_BOOTMEM
353static void __init subtract_early_res(struct range *range, int az)
354{
355 int i, count;
356 u64 final_start, final_end;
357 int idx = 0;
358
359 count = 0;
360 for (i = 0; i < max_early_res && early_res[i].end; i++)
361 count++;
362
363 /* need to skip first one ?*/
364 if (early_res != early_res_x)
365 idx = 1;
366
367#define DEBUG_PRINT_EARLY_RES 1
368
369#if DEBUG_PRINT_EARLY_RES
370 printk(KERN_INFO "Subtract (%d early reservations)\n", count);
371#endif
372 for (i = idx; i < count; i++) {
373 struct early_res *r = &early_res[i];
374#if DEBUG_PRINT_EARLY_RES
375 printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
376 r->start, r->end, r->name);
377#endif
378 final_start = PFN_DOWN(r->start);
379 final_end = PFN_UP(r->end);
380 if (final_start >= final_end)
381 continue;
382 subtract_range(range, az, final_start, final_end);
383 }
384
385}
386
387int __init get_free_all_memory_range(struct range **rangep, int nodeid)
388{
389 int i, count;
390 u64 start = 0, end;
391 u64 size;
392 u64 mem;
393 struct range *range;
394 int nr_range;
395
396 count = 0;
397 for (i = 0; i < max_early_res && early_res[i].end; i++)
398 count++;
399
400 count *= 2;
401
402 size = sizeof(struct range) * count;
403 end = get_max_mapped();
404#ifdef MAX_DMA32_PFN
405 if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
406 start = MAX_DMA32_PFN << PAGE_SHIFT;
407#endif
408 mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
409 if (mem == -1ULL)
410 panic("can not find more space for range free");
411
412 range = __va(mem);
413 /* use early_node_map[] and early_res to get range array at first */
414 memset(range, 0, size);
415 nr_range = 0;
416
417 /* need to go over early_node_map to find out good range for node */
418 nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
419#ifdef CONFIG_X86_32
420 subtract_range(range, count, max_low_pfn, -1ULL);
421#endif
422 subtract_early_res(range, count);
423 nr_range = clean_sort_range(range, count);
424
425 /* need to clear it ? */
426 if (nodeid == MAX_NUMNODES) {
427 memset(&early_res[0], 0,
428 sizeof(struct early_res) * max_early_res);
429 early_res = NULL;
430 max_early_res = 0;
431 }
432
433 *rangep = range;
434 return nr_range;
435}
436#else
437void __init early_res_to_bootmem(u64 start, u64 end)
438{
439 int i, count;
440 u64 final_start, final_end;
441 int idx = 0;
442
443 count = 0;
444 for (i = 0; i < max_early_res && early_res[i].end; i++)
445 count++;
446
447 /* need to skip first one ?*/
448 if (early_res != early_res_x)
449 idx = 1;
450
451 printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
452 count - idx, max_early_res, start, end);
453 for (i = idx; i < count; i++) {
454 struct early_res *r = &early_res[i];
455 printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
456 r->start, r->end, r->name);
457 final_start = max(start, r->start);
458 final_end = min(end, r->end);
459 if (final_start >= final_end) {
460 printk(KERN_CONT "\n");
461 continue;
462 }
463 printk(KERN_CONT " ==> [%010llx - %010llx]\n",
464 final_start, final_end);
465 reserve_bootmem_generic(final_start, final_end - final_start,
466 BOOTMEM_DEFAULT);
467 }
468 /* clear them */
469 memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
470 early_res = NULL;
471 max_early_res = 0;
472 early_res_count = 0;
473}
474#endif
475
476/* Check for already reserved areas */
477static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
478{
479 int i;
480 u64 addr = *addrp;
481 int changed = 0;
482 struct early_res *r;
483again:
484 i = find_overlapped_early(addr, addr + size);
485 r = &early_res[i];
486 if (i < max_early_res && r->end) {
487 *addrp = addr = round_up(r->end, align);
488 changed = 1;
489 goto again;
490 }
491 return changed;
492}
493
494/* Check for already reserved areas */
495static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
496{
497 int i;
498 u64 addr = *addrp, last;
499 u64 size = *sizep;
500 int changed = 0;
501again:
502 last = addr + size;
503 for (i = 0; i < max_early_res && early_res[i].end; i++) {
504 struct early_res *r = &early_res[i];
505 if (last > r->start && addr < r->start) {
506 size = r->start - addr;
507 changed = 1;
508 goto again;
509 }
510 if (last > r->end && addr < r->end) {
511 addr = round_up(r->end, align);
512 size = last - addr;
513 changed = 1;
514 goto again;
515 }
516 if (last <= r->end && addr >= r->start) {
517 (*sizep)++;
518 return 0;
519 }
520 }
521 if (changed) {
522 *addrp = addr;
523 *sizep = size;
524 }
525 return changed;
526}
527
528/*
529 * Find a free area with specified alignment in a specific range.
530 * only with the area.between start to end is active range from early_node_map
531 * so they are good as RAM
532 */
533u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
534 u64 size, u64 align)
535{
536 u64 addr, last;
537
538 addr = round_up(ei_start, align);
539 if (addr < start)
540 addr = round_up(start, align);
541 if (addr >= ei_last)
542 goto out;
543 while (bad_addr(&addr, size, align) && addr+size <= ei_last)
544 ;
545 last = addr + size;
546 if (last > ei_last)
547 goto out;
548 if (last > end)
549 goto out;
550
551 return addr;
552
553out:
554 return -1ULL;
555}
556
557u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
558 u64 *sizep, u64 align)
559{
560 u64 addr, last;
561
562 addr = round_up(ei_start, align);
563 if (addr < start)
564 addr = round_up(start, align);
565 if (addr >= ei_last)
566 goto out;
567 *sizep = ei_last - addr;
568 while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
569 ;
570 last = addr + *sizep;
571 if (last > ei_last)
572 goto out;
573
574 return addr;
575
576out:
577 return -1ULL;
578}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ecc3fa28f666..d70394f12ee9 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,11 +18,7 @@
18 18
19#include "internals.h" 19#include "internals.h"
20 20
21/** 21static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22 * dynamic_irq_init - initialize a dynamically allocated irq
23 * @irq: irq number to initialize
24 */
25void dynamic_irq_init(unsigned int irq)
26{ 22{
27 struct irq_desc *desc; 23 struct irq_desc *desc;
28 unsigned long flags; 24 unsigned long flags;
@@ -41,7 +37,8 @@ void dynamic_irq_init(unsigned int irq)
41 desc->depth = 1; 37 desc->depth = 1;
42 desc->msi_desc = NULL; 38 desc->msi_desc = NULL;
43 desc->handler_data = NULL; 39 desc->handler_data = NULL;
44 desc->chip_data = NULL; 40 if (!keep_chip_data)
41 desc->chip_data = NULL;
45 desc->action = NULL; 42 desc->action = NULL;
46 desc->irq_count = 0; 43 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 44 desc->irqs_unhandled = 0;
@@ -55,10 +52,26 @@ void dynamic_irq_init(unsigned int irq)
55} 52}
56 53
57/** 54/**
58 * dynamic_irq_cleanup - cleanup a dynamically allocated irq 55 * dynamic_irq_init - initialize a dynamically allocated irq
59 * @irq: irq number to initialize 56 * @irq: irq number to initialize
60 */ 57 */
61void dynamic_irq_cleanup(unsigned int irq) 58void dynamic_irq_init(unsigned int irq)
59{
60 dynamic_irq_init_x(irq, false);
61}
62
63/**
64 * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65 * @irq: irq number to initialize
66 *
67 * does not set irq_to_desc(irq)->chip_data to NULL
68 */
69void dynamic_irq_init_keep_chip_data(unsigned int irq)
70{
71 dynamic_irq_init_x(irq, true);
72}
73
74static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
62{ 75{
63 struct irq_desc *desc = irq_to_desc(irq); 76 struct irq_desc *desc = irq_to_desc(irq);
64 unsigned long flags; 77 unsigned long flags;
@@ -77,7 +90,8 @@ void dynamic_irq_cleanup(unsigned int irq)
77 } 90 }
78 desc->msi_desc = NULL; 91 desc->msi_desc = NULL;
79 desc->handler_data = NULL; 92 desc->handler_data = NULL;
80 desc->chip_data = NULL; 93 if (!keep_chip_data)
94 desc->chip_data = NULL;
81 desc->handle_irq = handle_bad_irq; 95 desc->handle_irq = handle_bad_irq;
82 desc->chip = &no_irq_chip; 96 desc->chip = &no_irq_chip;
83 desc->name = NULL; 97 desc->name = NULL;
@@ -85,6 +99,26 @@ void dynamic_irq_cleanup(unsigned int irq)
85 raw_spin_unlock_irqrestore(&desc->lock, flags); 99 raw_spin_unlock_irqrestore(&desc->lock, flags);
86} 100}
87 101
102/**
103 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
104 * @irq: irq number to initialize
105 */
106void dynamic_irq_cleanup(unsigned int irq)
107{
108 dynamic_irq_cleanup_x(irq, false);
109}
110
111/**
112 * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113 * @irq: irq number to initialize
114 *
115 * does not set irq_to_desc(irq)->chip_data to NULL
116 */
117void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118{
119 dynamic_irq_cleanup_x(irq, true);
120}
121
88 122
89/** 123/**
90 * set_irq_chip - set the irq chip for an irq 124 * set_irq_chip - set the irq chip for an irq
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 814940e7f485..76d5a671bfe1 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -19,7 +19,7 @@
19#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
20#include <linux/rculist.h> 20#include <linux/rculist.h>
21#include <linux/hash.h> 21#include <linux/hash.h>
22#include <linux/bootmem.h> 22#include <linux/radix-tree.h>
23#include <trace/events/irq.h> 23#include <trace/events/irq.h>
24 24
25#include "internals.h" 25#include "internals.h"
@@ -87,12 +87,8 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
87{ 87{
88 void *ptr; 88 void *ptr;
89 89
90 if (slab_is_available()) 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
91 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 91 GFP_ATOMIC, node);
92 GFP_ATOMIC, node);
93 else
94 ptr = alloc_bootmem_node(NODE_DATA(node),
95 nr * sizeof(*desc->kstat_irqs));
96 92
97 /* 93 /*
98 * don't overwite if can not get new one 94 * don't overwite if can not get new one
@@ -132,7 +128,26 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
132 */ 128 */
133DEFINE_RAW_SPINLOCK(sparse_irq_lock); 129DEFINE_RAW_SPINLOCK(sparse_irq_lock);
134 130
135struct irq_desc **irq_desc_ptrs __read_mostly; 131static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
132
133static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
134{
135 radix_tree_insert(&irq_desc_tree, irq, desc);
136}
137
138struct irq_desc *irq_to_desc(unsigned int irq)
139{
140 return radix_tree_lookup(&irq_desc_tree, irq);
141}
142
143void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
144{
145 void **ptr;
146
147 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
148 if (ptr)
149 radix_tree_replace_slot(ptr, desc);
150}
136 151
137static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 152static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
138 [0 ... NR_IRQS_LEGACY-1] = { 153 [0 ... NR_IRQS_LEGACY-1] = {
@@ -164,9 +179,6 @@ int __init early_irq_init(void)
164 legacy_count = ARRAY_SIZE(irq_desc_legacy); 179 legacy_count = ARRAY_SIZE(irq_desc_legacy);
165 node = first_online_node; 180 node = first_online_node;
166 181
167 /* allocate irq_desc_ptrs array based on nr_irqs */
168 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
169
170 /* allocate based on nr_cpu_ids */ 182 /* allocate based on nr_cpu_ids */
171 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 183 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
172 sizeof(int), GFP_NOWAIT, node); 184 sizeof(int), GFP_NOWAIT, node);
@@ -180,23 +192,12 @@ int __init early_irq_init(void)
180 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 192 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
181 alloc_desc_masks(&desc[i], node, true); 193 alloc_desc_masks(&desc[i], node, true);
182 init_desc_masks(&desc[i]); 194 init_desc_masks(&desc[i]);
183 irq_desc_ptrs[i] = desc + i; 195 set_irq_desc(i, &desc[i]);
184 } 196 }
185 197
186 for (i = legacy_count; i < nr_irqs; i++)
187 irq_desc_ptrs[i] = NULL;
188
189 return arch_early_irq_init(); 198 return arch_early_irq_init();
190} 199}
191 200
192struct irq_desc *irq_to_desc(unsigned int irq)
193{
194 if (irq_desc_ptrs && irq < nr_irqs)
195 return irq_desc_ptrs[irq];
196
197 return NULL;
198}
199
200struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 201struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201{ 202{
202 struct irq_desc *desc; 203 struct irq_desc *desc;
@@ -208,21 +209,18 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
208 return NULL; 209 return NULL;
209 } 210 }
210 211
211 desc = irq_desc_ptrs[irq]; 212 desc = irq_to_desc(irq);
212 if (desc) 213 if (desc)
213 return desc; 214 return desc;
214 215
215 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 216 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216 217
217 /* We have to check it to avoid races with another CPU */ 218 /* We have to check it to avoid races with another CPU */
218 desc = irq_desc_ptrs[irq]; 219 desc = irq_to_desc(irq);
219 if (desc) 220 if (desc)
220 goto out_unlock; 221 goto out_unlock;
221 222
222 if (slab_is_available()) 223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224 else
225 desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
226 224
227 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 225 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
228 if (!desc) { 226 if (!desc) {
@@ -231,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
231 } 229 }
232 init_one_irq_desc(irq, desc, node); 230 init_one_irq_desc(irq, desc, node);
233 231
234 irq_desc_ptrs[irq] = desc; 232 set_irq_desc(irq, desc);
235 233
236out_unlock: 234out_unlock:
237 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 235 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b2821f070a3d..c63f3bc88f0b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -21,11 +21,7 @@ extern void clear_kstat_irqs(struct irq_desc *desc);
21extern raw_spinlock_t sparse_irq_lock; 21extern raw_spinlock_t sparse_irq_lock;
22 22
23#ifdef CONFIG_SPARSE_IRQ 23#ifdef CONFIG_SPARSE_IRQ
24/* irq_desc_ptrs allocated at boot time */ 24void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
25extern struct irq_desc **irq_desc_ptrs;
26#else
27/* irq_desc_ptrs is a fixed size array */
28extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
29#endif 25#endif
30 26
31#ifdef CONFIG_PROC_FS 27#ifdef CONFIG_PROC_FS
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 26bac9d8f860..963559dbd858 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -70,7 +70,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
70 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 70 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
71 71
72 /* We have to check it to avoid races with another CPU */ 72 /* We have to check it to avoid races with another CPU */
73 desc = irq_desc_ptrs[irq]; 73 desc = irq_to_desc(irq);
74 74
75 if (desc && old_desc != desc) 75 if (desc && old_desc != desc)
76 goto out_unlock; 76 goto out_unlock;
@@ -90,7 +90,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
90 goto out_unlock; 90 goto out_unlock;
91 } 91 }
92 92
93 irq_desc_ptrs[irq] = desc; 93 replace_irq_desc(irq, desc);
94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
95 95
96 /* free the old one */ 96 /* free the old one */
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ef077fb73155..87ebe8adc474 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -41,7 +41,7 @@
41#include <asm/sections.h> 41#include <asm/sections.h>
42 42
43/* Per cpu memory for storing cpu states in case of system crash. */ 43/* Per cpu memory for storing cpu states in case of system crash. */
44note_buf_t* crash_notes; 44note_buf_t __percpu *crash_notes;
45 45
46/* vmcoreinfo stuff */ 46/* vmcoreinfo stuff */
47static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; 47static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ccec774c716d..fa034d29cf73 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -42,9 +42,11 @@
42#include <linux/freezer.h> 42#include <linux/freezer.h>
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45#include <linux/sysctl.h>
45#include <linux/kdebug.h> 46#include <linux/kdebug.h>
46#include <linux/memory.h> 47#include <linux/memory.h>
47#include <linux/ftrace.h> 48#include <linux/ftrace.h>
49#include <linux/cpu.h>
48 50
49#include <asm-generic/sections.h> 51#include <asm-generic/sections.h>
50#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
@@ -105,57 +107,74 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
105 * stepping on the instruction on a vmalloced/kmalloced/data page 107 * stepping on the instruction on a vmalloced/kmalloced/data page
106 * is a recipe for disaster 108 * is a recipe for disaster
107 */ 109 */
108#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
109
110struct kprobe_insn_page { 110struct kprobe_insn_page {
111 struct list_head list; 111 struct list_head list;
112 kprobe_opcode_t *insns; /* Page of instruction slots */ 112 kprobe_opcode_t *insns; /* Page of instruction slots */
113 char slot_used[INSNS_PER_PAGE];
114 int nused; 113 int nused;
115 int ngarbage; 114 int ngarbage;
115 char slot_used[];
116};
117
118#define KPROBE_INSN_PAGE_SIZE(slots) \
119 (offsetof(struct kprobe_insn_page, slot_used) + \
120 (sizeof(char) * (slots)))
121
122struct kprobe_insn_cache {
123 struct list_head pages; /* list of kprobe_insn_page */
124 size_t insn_size; /* size of instruction slot */
125 int nr_garbage;
116}; 126};
117 127
128static int slots_per_page(struct kprobe_insn_cache *c)
129{
130 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
131}
132
118enum kprobe_slot_state { 133enum kprobe_slot_state {
119 SLOT_CLEAN = 0, 134 SLOT_CLEAN = 0,
120 SLOT_DIRTY = 1, 135 SLOT_DIRTY = 1,
121 SLOT_USED = 2, 136 SLOT_USED = 2,
122}; 137};
123 138
124static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ 139static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
125static LIST_HEAD(kprobe_insn_pages); 140static struct kprobe_insn_cache kprobe_insn_slots = {
126static int kprobe_garbage_slots; 141 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
127static int collect_garbage_slots(void); 142 .insn_size = MAX_INSN_SIZE,
143 .nr_garbage = 0,
144};
145static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
128 146
129/** 147/**
130 * __get_insn_slot() - Find a slot on an executable page for an instruction. 148 * __get_insn_slot() - Find a slot on an executable page for an instruction.
131 * We allocate an executable page if there's no room on existing ones. 149 * We allocate an executable page if there's no room on existing ones.
132 */ 150 */
133static kprobe_opcode_t __kprobes *__get_insn_slot(void) 151static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
134{ 152{
135 struct kprobe_insn_page *kip; 153 struct kprobe_insn_page *kip;
136 154
137 retry: 155 retry:
138 list_for_each_entry(kip, &kprobe_insn_pages, list) { 156 list_for_each_entry(kip, &c->pages, list) {
139 if (kip->nused < INSNS_PER_PAGE) { 157 if (kip->nused < slots_per_page(c)) {
140 int i; 158 int i;
141 for (i = 0; i < INSNS_PER_PAGE; i++) { 159 for (i = 0; i < slots_per_page(c); i++) {
142 if (kip->slot_used[i] == SLOT_CLEAN) { 160 if (kip->slot_used[i] == SLOT_CLEAN) {
143 kip->slot_used[i] = SLOT_USED; 161 kip->slot_used[i] = SLOT_USED;
144 kip->nused++; 162 kip->nused++;
145 return kip->insns + (i * MAX_INSN_SIZE); 163 return kip->insns + (i * c->insn_size);
146 } 164 }
147 } 165 }
148 /* Surprise! No unused slots. Fix kip->nused. */ 166 /* kip->nused is broken. Fix it. */
149 kip->nused = INSNS_PER_PAGE; 167 kip->nused = slots_per_page(c);
168 WARN_ON(1);
150 } 169 }
151 } 170 }
152 171
153 /* If there are any garbage slots, collect it and try again. */ 172 /* If there are any garbage slots, collect it and try again. */
154 if (kprobe_garbage_slots && collect_garbage_slots() == 0) { 173 if (c->nr_garbage && collect_garbage_slots(c) == 0)
155 goto retry; 174 goto retry;
156 } 175
157 /* All out of space. Need to allocate a new page. Use slot 0. */ 176 /* All out of space. Need to allocate a new page. */
158 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 177 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
159 if (!kip) 178 if (!kip)
160 return NULL; 179 return NULL;
161 180
@@ -170,20 +189,23 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
170 return NULL; 189 return NULL;
171 } 190 }
172 INIT_LIST_HEAD(&kip->list); 191 INIT_LIST_HEAD(&kip->list);
173 list_add(&kip->list, &kprobe_insn_pages); 192 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
174 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
175 kip->slot_used[0] = SLOT_USED; 193 kip->slot_used[0] = SLOT_USED;
176 kip->nused = 1; 194 kip->nused = 1;
177 kip->ngarbage = 0; 195 kip->ngarbage = 0;
196 list_add(&kip->list, &c->pages);
178 return kip->insns; 197 return kip->insns;
179} 198}
180 199
200
181kprobe_opcode_t __kprobes *get_insn_slot(void) 201kprobe_opcode_t __kprobes *get_insn_slot(void)
182{ 202{
183 kprobe_opcode_t *ret; 203 kprobe_opcode_t *ret = NULL;
204
184 mutex_lock(&kprobe_insn_mutex); 205 mutex_lock(&kprobe_insn_mutex);
185 ret = __get_insn_slot(); 206 ret = __get_insn_slot(&kprobe_insn_slots);
186 mutex_unlock(&kprobe_insn_mutex); 207 mutex_unlock(&kprobe_insn_mutex);
208
187 return ret; 209 return ret;
188} 210}
189 211
@@ -199,7 +221,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
199 * so as not to have to set it up again the 221 * so as not to have to set it up again the
200 * next time somebody inserts a probe. 222 * next time somebody inserts a probe.
201 */ 223 */
202 if (!list_is_singular(&kprobe_insn_pages)) { 224 if (!list_is_singular(&kip->list)) {
203 list_del(&kip->list); 225 list_del(&kip->list);
204 module_free(NULL, kip->insns); 226 module_free(NULL, kip->insns);
205 kfree(kip); 227 kfree(kip);
@@ -209,51 +231,84 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
209 return 0; 231 return 0;
210} 232}
211 233
212static int __kprobes collect_garbage_slots(void) 234static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
213{ 235{
214 struct kprobe_insn_page *kip, *next; 236 struct kprobe_insn_page *kip, *next;
215 237
216 /* Ensure no-one is interrupted on the garbages */ 238 /* Ensure no-one is interrupted on the garbages */
217 synchronize_sched(); 239 synchronize_sched();
218 240
219 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { 241 list_for_each_entry_safe(kip, next, &c->pages, list) {
220 int i; 242 int i;
221 if (kip->ngarbage == 0) 243 if (kip->ngarbage == 0)
222 continue; 244 continue;
223 kip->ngarbage = 0; /* we will collect all garbages */ 245 kip->ngarbage = 0; /* we will collect all garbages */
224 for (i = 0; i < INSNS_PER_PAGE; i++) { 246 for (i = 0; i < slots_per_page(c); i++) {
225 if (kip->slot_used[i] == SLOT_DIRTY && 247 if (kip->slot_used[i] == SLOT_DIRTY &&
226 collect_one_slot(kip, i)) 248 collect_one_slot(kip, i))
227 break; 249 break;
228 } 250 }
229 } 251 }
230 kprobe_garbage_slots = 0; 252 c->nr_garbage = 0;
231 return 0; 253 return 0;
232} 254}
233 255
234void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 256static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
257 kprobe_opcode_t *slot, int dirty)
235{ 258{
236 struct kprobe_insn_page *kip; 259 struct kprobe_insn_page *kip;
237 260
238 mutex_lock(&kprobe_insn_mutex); 261 list_for_each_entry(kip, &c->pages, list) {
239 list_for_each_entry(kip, &kprobe_insn_pages, list) { 262 long idx = ((long)slot - (long)kip->insns) / c->insn_size;
240 if (kip->insns <= slot && 263 if (idx >= 0 && idx < slots_per_page(c)) {
241 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 264 WARN_ON(kip->slot_used[idx] != SLOT_USED);
242 int i = (slot - kip->insns) / MAX_INSN_SIZE;
243 if (dirty) { 265 if (dirty) {
244 kip->slot_used[i] = SLOT_DIRTY; 266 kip->slot_used[idx] = SLOT_DIRTY;
245 kip->ngarbage++; 267 kip->ngarbage++;
268 if (++c->nr_garbage > slots_per_page(c))
269 collect_garbage_slots(c);
246 } else 270 } else
247 collect_one_slot(kip, i); 271 collect_one_slot(kip, idx);
248 break; 272 return;
249 } 273 }
250 } 274 }
275 /* Could not free this slot. */
276 WARN_ON(1);
277}
251 278
252 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) 279void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
253 collect_garbage_slots(); 280{
254 281 mutex_lock(&kprobe_insn_mutex);
282 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
255 mutex_unlock(&kprobe_insn_mutex); 283 mutex_unlock(&kprobe_insn_mutex);
256} 284}
285#ifdef CONFIG_OPTPROBES
286/* For optimized_kprobe buffer */
287static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
288static struct kprobe_insn_cache kprobe_optinsn_slots = {
289 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
290 /* .insn_size is initialized later */
291 .nr_garbage = 0,
292};
293/* Get a slot for optimized_kprobe buffer */
294kprobe_opcode_t __kprobes *get_optinsn_slot(void)
295{
296 kprobe_opcode_t *ret = NULL;
297
298 mutex_lock(&kprobe_optinsn_mutex);
299 ret = __get_insn_slot(&kprobe_optinsn_slots);
300 mutex_unlock(&kprobe_optinsn_mutex);
301
302 return ret;
303}
304
305void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
306{
307 mutex_lock(&kprobe_optinsn_mutex);
308 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
309 mutex_unlock(&kprobe_optinsn_mutex);
310}
311#endif
257#endif 312#endif
258 313
259/* We have preemption disabled.. so it is safe to use __ versions */ 314/* We have preemption disabled.. so it is safe to use __ versions */
@@ -284,23 +339,401 @@ struct kprobe __kprobes *get_kprobe(void *addr)
284 if (p->addr == addr) 339 if (p->addr == addr)
285 return p; 340 return p;
286 } 341 }
342
343 return NULL;
344}
345
346static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
347
348/* Return true if the kprobe is an aggregator */
349static inline int kprobe_aggrprobe(struct kprobe *p)
350{
351 return p->pre_handler == aggr_pre_handler;
352}
353
354/*
355 * Keep all fields in the kprobe consistent
356 */
357static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
358{
359 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
360 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
361}
362
363#ifdef CONFIG_OPTPROBES
364/* NOTE: change this value only with kprobe_mutex held */
365static bool kprobes_allow_optimization;
366
367/*
368 * Call all pre_handler on the list, but ignores its return value.
369 * This must be called from arch-dep optimized caller.
370 */
371void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
372{
373 struct kprobe *kp;
374
375 list_for_each_entry_rcu(kp, &p->list, list) {
376 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
377 set_kprobe_instance(kp);
378 kp->pre_handler(kp, regs);
379 }
380 reset_kprobe_instance();
381 }
382}
383
384/* Return true(!0) if the kprobe is ready for optimization. */
385static inline int kprobe_optready(struct kprobe *p)
386{
387 struct optimized_kprobe *op;
388
389 if (kprobe_aggrprobe(p)) {
390 op = container_of(p, struct optimized_kprobe, kp);
391 return arch_prepared_optinsn(&op->optinsn);
392 }
393
394 return 0;
395}
396
397/*
398 * Return an optimized kprobe whose optimizing code replaces
399 * instructions including addr (exclude breakpoint).
400 */
401struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
402{
403 int i;
404 struct kprobe *p = NULL;
405 struct optimized_kprobe *op;
406
407 /* Don't check i == 0, since that is a breakpoint case. */
408 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
409 p = get_kprobe((void *)(addr - i));
410
411 if (p && kprobe_optready(p)) {
412 op = container_of(p, struct optimized_kprobe, kp);
413 if (arch_within_optimized_kprobe(op, addr))
414 return p;
415 }
416
287 return NULL; 417 return NULL;
288} 418}
289 419
420/* Optimization staging list, protected by kprobe_mutex */
421static LIST_HEAD(optimizing_list);
422
423static void kprobe_optimizer(struct work_struct *work);
424static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
425#define OPTIMIZE_DELAY 5
426
427/* Kprobe jump optimizer */
428static __kprobes void kprobe_optimizer(struct work_struct *work)
429{
430 struct optimized_kprobe *op, *tmp;
431
432 /* Lock modules while optimizing kprobes */
433 mutex_lock(&module_mutex);
434 mutex_lock(&kprobe_mutex);
435 if (kprobes_all_disarmed || !kprobes_allow_optimization)
436 goto end;
437
438 /*
439 * Wait for quiesence period to ensure all running interrupts
440 * are done. Because optprobe may modify multiple instructions
441 * there is a chance that Nth instruction is interrupted. In that
442 * case, running interrupt can return to 2nd-Nth byte of jump
443 * instruction. This wait is for avoiding it.
444 */
445 synchronize_sched();
446
447 /*
448 * The optimization/unoptimization refers online_cpus via
449 * stop_machine() and cpu-hotplug modifies online_cpus.
450 * And same time, text_mutex will be held in cpu-hotplug and here.
451 * This combination can cause a deadlock (cpu-hotplug try to lock
452 * text_mutex but stop_machine can not be done because online_cpus
453 * has been changed)
454 * To avoid this deadlock, we need to call get_online_cpus()
455 * for preventing cpu-hotplug outside of text_mutex locking.
456 */
457 get_online_cpus();
458 mutex_lock(&text_mutex);
459 list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
460 WARN_ON(kprobe_disabled(&op->kp));
461 if (arch_optimize_kprobe(op) < 0)
462 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
463 list_del_init(&op->list);
464 }
465 mutex_unlock(&text_mutex);
466 put_online_cpus();
467end:
468 mutex_unlock(&kprobe_mutex);
469 mutex_unlock(&module_mutex);
470}
471
472/* Optimize kprobe if p is ready to be optimized */
473static __kprobes void optimize_kprobe(struct kprobe *p)
474{
475 struct optimized_kprobe *op;
476
477 /* Check if the kprobe is disabled or not ready for optimization. */
478 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
479 (kprobe_disabled(p) || kprobes_all_disarmed))
480 return;
481
482 /* Both of break_handler and post_handler are not supported. */
483 if (p->break_handler || p->post_handler)
484 return;
485
486 op = container_of(p, struct optimized_kprobe, kp);
487
488 /* Check there is no other kprobes at the optimized instructions */
489 if (arch_check_optimized_kprobe(op) < 0)
490 return;
491
492 /* Check if it is already optimized. */
493 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
494 return;
495
496 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
497 list_add(&op->list, &optimizing_list);
498 if (!delayed_work_pending(&optimizing_work))
499 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
500}
501
502/* Unoptimize a kprobe if p is optimized */
503static __kprobes void unoptimize_kprobe(struct kprobe *p)
504{
505 struct optimized_kprobe *op;
506
507 if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
508 op = container_of(p, struct optimized_kprobe, kp);
509 if (!list_empty(&op->list))
510 /* Dequeue from the optimization queue */
511 list_del_init(&op->list);
512 else
513 /* Replace jump with break */
514 arch_unoptimize_kprobe(op);
515 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
516 }
517}
518
519/* Remove optimized instructions */
520static void __kprobes kill_optimized_kprobe(struct kprobe *p)
521{
522 struct optimized_kprobe *op;
523
524 op = container_of(p, struct optimized_kprobe, kp);
525 if (!list_empty(&op->list)) {
526 /* Dequeue from the optimization queue */
527 list_del_init(&op->list);
528 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
529 }
530 /* Don't unoptimize, because the target code will be freed. */
531 arch_remove_optimized_kprobe(op);
532}
533
534/* Try to prepare optimized instructions */
535static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
536{
537 struct optimized_kprobe *op;
538
539 op = container_of(p, struct optimized_kprobe, kp);
540 arch_prepare_optimized_kprobe(op);
541}
542
543/* Free optimized instructions and optimized_kprobe */
544static __kprobes void free_aggr_kprobe(struct kprobe *p)
545{
546 struct optimized_kprobe *op;
547
548 op = container_of(p, struct optimized_kprobe, kp);
549 arch_remove_optimized_kprobe(op);
550 kfree(op);
551}
552
553/* Allocate new optimized_kprobe and try to prepare optimized instructions */
554static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
555{
556 struct optimized_kprobe *op;
557
558 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
559 if (!op)
560 return NULL;
561
562 INIT_LIST_HEAD(&op->list);
563 op->kp.addr = p->addr;
564 arch_prepare_optimized_kprobe(op);
565
566 return &op->kp;
567}
568
569static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
570
571/*
572 * Prepare an optimized_kprobe and optimize it
573 * NOTE: p must be a normal registered kprobe
574 */
575static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
576{
577 struct kprobe *ap;
578 struct optimized_kprobe *op;
579
580 ap = alloc_aggr_kprobe(p);
581 if (!ap)
582 return;
583
584 op = container_of(ap, struct optimized_kprobe, kp);
585 if (!arch_prepared_optinsn(&op->optinsn)) {
586 /* If failed to setup optimizing, fallback to kprobe */
587 free_aggr_kprobe(ap);
588 return;
589 }
590
591 init_aggr_kprobe(ap, p);
592 optimize_kprobe(ap);
593}
594
595#ifdef CONFIG_SYSCTL
596static void __kprobes optimize_all_kprobes(void)
597{
598 struct hlist_head *head;
599 struct hlist_node *node;
600 struct kprobe *p;
601 unsigned int i;
602
603 /* If optimization is already allowed, just return */
604 if (kprobes_allow_optimization)
605 return;
606
607 kprobes_allow_optimization = true;
608 mutex_lock(&text_mutex);
609 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
610 head = &kprobe_table[i];
611 hlist_for_each_entry_rcu(p, node, head, hlist)
612 if (!kprobe_disabled(p))
613 optimize_kprobe(p);
614 }
615 mutex_unlock(&text_mutex);
616 printk(KERN_INFO "Kprobes globally optimized\n");
617}
618
619static void __kprobes unoptimize_all_kprobes(void)
620{
621 struct hlist_head *head;
622 struct hlist_node *node;
623 struct kprobe *p;
624 unsigned int i;
625
626 /* If optimization is already prohibited, just return */
627 if (!kprobes_allow_optimization)
628 return;
629
630 kprobes_allow_optimization = false;
631 printk(KERN_INFO "Kprobes globally unoptimized\n");
632 get_online_cpus(); /* For avoiding text_mutex deadlock */
633 mutex_lock(&text_mutex);
634 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
635 head = &kprobe_table[i];
636 hlist_for_each_entry_rcu(p, node, head, hlist) {
637 if (!kprobe_disabled(p))
638 unoptimize_kprobe(p);
639 }
640 }
641
642 mutex_unlock(&text_mutex);
643 put_online_cpus();
644 /* Allow all currently running kprobes to complete */
645 synchronize_sched();
646}
647
648int sysctl_kprobes_optimization;
649int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
650 void __user *buffer, size_t *length,
651 loff_t *ppos)
652{
653 int ret;
654
655 mutex_lock(&kprobe_mutex);
656 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
657 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
658
659 if (sysctl_kprobes_optimization)
660 optimize_all_kprobes();
661 else
662 unoptimize_all_kprobes();
663 mutex_unlock(&kprobe_mutex);
664
665 return ret;
666}
667#endif /* CONFIG_SYSCTL */
668
669static void __kprobes __arm_kprobe(struct kprobe *p)
670{
671 struct kprobe *old_p;
672
673 /* Check collision with other optimized kprobes */
674 old_p = get_optimized_kprobe((unsigned long)p->addr);
675 if (unlikely(old_p))
676 unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
677
678 arch_arm_kprobe(p);
679 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
680}
681
682static void __kprobes __disarm_kprobe(struct kprobe *p)
683{
684 struct kprobe *old_p;
685
686 unoptimize_kprobe(p); /* Try to unoptimize */
687 arch_disarm_kprobe(p);
688
689 /* If another kprobe was blocked, optimize it. */
690 old_p = get_optimized_kprobe((unsigned long)p->addr);
691 if (unlikely(old_p))
692 optimize_kprobe(old_p);
693}
694
695#else /* !CONFIG_OPTPROBES */
696
697#define optimize_kprobe(p) do {} while (0)
698#define unoptimize_kprobe(p) do {} while (0)
699#define kill_optimized_kprobe(p) do {} while (0)
700#define prepare_optimized_kprobe(p) do {} while (0)
701#define try_to_optimize_kprobe(p) do {} while (0)
702#define __arm_kprobe(p) arch_arm_kprobe(p)
703#define __disarm_kprobe(p) arch_disarm_kprobe(p)
704
705static __kprobes void free_aggr_kprobe(struct kprobe *p)
706{
707 kfree(p);
708}
709
710static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
711{
712 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
713}
714#endif /* CONFIG_OPTPROBES */
715
290/* Arm a kprobe with text_mutex */ 716/* Arm a kprobe with text_mutex */
291static void __kprobes arm_kprobe(struct kprobe *kp) 717static void __kprobes arm_kprobe(struct kprobe *kp)
292{ 718{
719 /*
720 * Here, since __arm_kprobe() doesn't use stop_machine(),
721 * this doesn't cause deadlock on text_mutex. So, we don't
722 * need get_online_cpus().
723 */
293 mutex_lock(&text_mutex); 724 mutex_lock(&text_mutex);
294 arch_arm_kprobe(kp); 725 __arm_kprobe(kp);
295 mutex_unlock(&text_mutex); 726 mutex_unlock(&text_mutex);
296} 727}
297 728
298/* Disarm a kprobe with text_mutex */ 729/* Disarm a kprobe with text_mutex */
299static void __kprobes disarm_kprobe(struct kprobe *kp) 730static void __kprobes disarm_kprobe(struct kprobe *kp)
300{ 731{
732 get_online_cpus(); /* For avoiding text_mutex deadlock */
301 mutex_lock(&text_mutex); 733 mutex_lock(&text_mutex);
302 arch_disarm_kprobe(kp); 734 __disarm_kprobe(kp);
303 mutex_unlock(&text_mutex); 735 mutex_unlock(&text_mutex);
736 put_online_cpus();
304} 737}
305 738
306/* 739/*
@@ -369,7 +802,7 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
369void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 802void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
370{ 803{
371 struct kprobe *kp; 804 struct kprobe *kp;
372 if (p->pre_handler != aggr_pre_handler) { 805 if (!kprobe_aggrprobe(p)) {
373 p->nmissed++; 806 p->nmissed++;
374 } else { 807 } else {
375 list_for_each_entry_rcu(kp, &p->list, list) 808 list_for_each_entry_rcu(kp, &p->list, list)
@@ -493,21 +926,16 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
493} 926}
494 927
495/* 928/*
496 * Keep all fields in the kprobe consistent
497 */
498static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
499{
500 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
501 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
502}
503
504/*
505* Add the new probe to ap->list. Fail if this is the 929* Add the new probe to ap->list. Fail if this is the
506* second jprobe at the address - two jprobes can't coexist 930* second jprobe at the address - two jprobes can't coexist
507*/ 931*/
508static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 932static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
509{ 933{
510 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 934 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
935
936 if (p->break_handler || p->post_handler)
937 unoptimize_kprobe(ap); /* Fall back to normal kprobe */
938
511 if (p->break_handler) { 939 if (p->break_handler) {
512 if (ap->break_handler) 940 if (ap->break_handler)
513 return -EEXIST; 941 return -EEXIST;
@@ -522,7 +950,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
522 ap->flags &= ~KPROBE_FLAG_DISABLED; 950 ap->flags &= ~KPROBE_FLAG_DISABLED;
523 if (!kprobes_all_disarmed) 951 if (!kprobes_all_disarmed)
524 /* Arm the breakpoint again. */ 952 /* Arm the breakpoint again. */
525 arm_kprobe(ap); 953 __arm_kprobe(ap);
526 } 954 }
527 return 0; 955 return 0;
528} 956}
@@ -531,12 +959,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
531 * Fill in the required fields of the "manager kprobe". Replace the 959 * Fill in the required fields of the "manager kprobe". Replace the
532 * earlier kprobe in the hlist with the manager kprobe 960 * earlier kprobe in the hlist with the manager kprobe
533 */ 961 */
534static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 962static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
535{ 963{
964 /* Copy p's insn slot to ap */
536 copy_kprobe(p, ap); 965 copy_kprobe(p, ap);
537 flush_insn_slot(ap); 966 flush_insn_slot(ap);
538 ap->addr = p->addr; 967 ap->addr = p->addr;
539 ap->flags = p->flags; 968 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
540 ap->pre_handler = aggr_pre_handler; 969 ap->pre_handler = aggr_pre_handler;
541 ap->fault_handler = aggr_fault_handler; 970 ap->fault_handler = aggr_fault_handler;
542 /* We don't care the kprobe which has gone. */ 971 /* We don't care the kprobe which has gone. */
@@ -546,8 +975,9 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
546 ap->break_handler = aggr_break_handler; 975 ap->break_handler = aggr_break_handler;
547 976
548 INIT_LIST_HEAD(&ap->list); 977 INIT_LIST_HEAD(&ap->list);
549 list_add_rcu(&p->list, &ap->list); 978 INIT_HLIST_NODE(&ap->hlist);
550 979
980 list_add_rcu(&p->list, &ap->list);
551 hlist_replace_rcu(&p->hlist, &ap->hlist); 981 hlist_replace_rcu(&p->hlist, &ap->hlist);
552} 982}
553 983
@@ -561,12 +991,12 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
561 int ret = 0; 991 int ret = 0;
562 struct kprobe *ap = old_p; 992 struct kprobe *ap = old_p;
563 993
564 if (old_p->pre_handler != aggr_pre_handler) { 994 if (!kprobe_aggrprobe(old_p)) {
565 /* If old_p is not an aggr_probe, create new aggr_kprobe. */ 995 /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
566 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 996 ap = alloc_aggr_kprobe(old_p);
567 if (!ap) 997 if (!ap)
568 return -ENOMEM; 998 return -ENOMEM;
569 add_aggr_kprobe(ap, old_p); 999 init_aggr_kprobe(ap, old_p);
570 } 1000 }
571 1001
572 if (kprobe_gone(ap)) { 1002 if (kprobe_gone(ap)) {
@@ -585,6 +1015,9 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
585 */ 1015 */
586 return ret; 1016 return ret;
587 1017
1018 /* Prepare optimized instructions if possible. */
1019 prepare_optimized_kprobe(ap);
1020
588 /* 1021 /*
589 * Clear gone flag to prevent allocating new slot again, and 1022 * Clear gone flag to prevent allocating new slot again, and
590 * set disabled flag because it is not armed yet. 1023 * set disabled flag because it is not armed yet.
@@ -593,6 +1026,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
593 | KPROBE_FLAG_DISABLED; 1026 | KPROBE_FLAG_DISABLED;
594 } 1027 }
595 1028
1029 /* Copy ap's insn slot to p */
596 copy_kprobe(ap, p); 1030 copy_kprobe(ap, p);
597 return add_new_kprobe(ap, p); 1031 return add_new_kprobe(ap, p);
598} 1032}
@@ -743,27 +1177,34 @@ int __kprobes register_kprobe(struct kprobe *p)
743 p->nmissed = 0; 1177 p->nmissed = 0;
744 INIT_LIST_HEAD(&p->list); 1178 INIT_LIST_HEAD(&p->list);
745 mutex_lock(&kprobe_mutex); 1179 mutex_lock(&kprobe_mutex);
1180
1181 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1182 mutex_lock(&text_mutex);
1183
746 old_p = get_kprobe(p->addr); 1184 old_p = get_kprobe(p->addr);
747 if (old_p) { 1185 if (old_p) {
1186 /* Since this may unoptimize old_p, locking text_mutex. */
748 ret = register_aggr_kprobe(old_p, p); 1187 ret = register_aggr_kprobe(old_p, p);
749 goto out; 1188 goto out;
750 } 1189 }
751 1190
752 mutex_lock(&text_mutex);
753 ret = arch_prepare_kprobe(p); 1191 ret = arch_prepare_kprobe(p);
754 if (ret) 1192 if (ret)
755 goto out_unlock_text; 1193 goto out;
756 1194
757 INIT_HLIST_NODE(&p->hlist); 1195 INIT_HLIST_NODE(&p->hlist);
758 hlist_add_head_rcu(&p->hlist, 1196 hlist_add_head_rcu(&p->hlist,
759 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1197 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
760 1198
761 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1199 if (!kprobes_all_disarmed && !kprobe_disabled(p))
762 arch_arm_kprobe(p); 1200 __arm_kprobe(p);
1201
1202 /* Try to optimize kprobe */
1203 try_to_optimize_kprobe(p);
763 1204
764out_unlock_text:
765 mutex_unlock(&text_mutex);
766out: 1205out:
1206 mutex_unlock(&text_mutex);
1207 put_online_cpus();
767 mutex_unlock(&kprobe_mutex); 1208 mutex_unlock(&kprobe_mutex);
768 1209
769 if (probed_mod) 1210 if (probed_mod)
@@ -785,7 +1226,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
785 return -EINVAL; 1226 return -EINVAL;
786 1227
787 if (old_p == p || 1228 if (old_p == p ||
788 (old_p->pre_handler == aggr_pre_handler && 1229 (kprobe_aggrprobe(old_p) &&
789 list_is_singular(&old_p->list))) { 1230 list_is_singular(&old_p->list))) {
790 /* 1231 /*
791 * Only probe on the hash list. Disarm only if kprobes are 1232 * Only probe on the hash list. Disarm only if kprobes are
@@ -793,7 +1234,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
793 * already have been removed. We save on flushing icache. 1234 * already have been removed. We save on flushing icache.
794 */ 1235 */
795 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) 1236 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
796 disarm_kprobe(p); 1237 disarm_kprobe(old_p);
797 hlist_del_rcu(&old_p->hlist); 1238 hlist_del_rcu(&old_p->hlist);
798 } else { 1239 } else {
799 if (p->break_handler && !kprobe_gone(p)) 1240 if (p->break_handler && !kprobe_gone(p))
@@ -809,8 +1250,13 @@ noclean:
809 list_del_rcu(&p->list); 1250 list_del_rcu(&p->list);
810 if (!kprobe_disabled(old_p)) { 1251 if (!kprobe_disabled(old_p)) {
811 try_to_disable_aggr_kprobe(old_p); 1252 try_to_disable_aggr_kprobe(old_p);
812 if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 1253 if (!kprobes_all_disarmed) {
813 disarm_kprobe(old_p); 1254 if (kprobe_disabled(old_p))
1255 disarm_kprobe(old_p);
1256 else
1257 /* Try to optimize this probe again */
1258 optimize_kprobe(old_p);
1259 }
814 } 1260 }
815 } 1261 }
816 return 0; 1262 return 0;
@@ -827,7 +1273,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
827 old_p = list_entry(p->list.next, struct kprobe, list); 1273 old_p = list_entry(p->list.next, struct kprobe, list);
828 list_del(&p->list); 1274 list_del(&p->list);
829 arch_remove_kprobe(old_p); 1275 arch_remove_kprobe(old_p);
830 kfree(old_p); 1276 free_aggr_kprobe(old_p);
831 } 1277 }
832} 1278}
833 1279
@@ -1123,7 +1569,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1123 struct kprobe *kp; 1569 struct kprobe *kp;
1124 1570
1125 p->flags |= KPROBE_FLAG_GONE; 1571 p->flags |= KPROBE_FLAG_GONE;
1126 if (p->pre_handler == aggr_pre_handler) { 1572 if (kprobe_aggrprobe(p)) {
1127 /* 1573 /*
1128 * If this is an aggr_kprobe, we have to list all the 1574 * If this is an aggr_kprobe, we have to list all the
1129 * chained probes and mark them GONE. 1575 * chained probes and mark them GONE.
@@ -1132,6 +1578,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1132 kp->flags |= KPROBE_FLAG_GONE; 1578 kp->flags |= KPROBE_FLAG_GONE;
1133 p->post_handler = NULL; 1579 p->post_handler = NULL;
1134 p->break_handler = NULL; 1580 p->break_handler = NULL;
1581 kill_optimized_kprobe(p);
1135 } 1582 }
1136 /* 1583 /*
1137 * Here, we can remove insn_slot safely, because no thread calls 1584 * Here, we can remove insn_slot safely, because no thread calls
@@ -1241,6 +1688,15 @@ static int __init init_kprobes(void)
1241 } 1688 }
1242 } 1689 }
1243 1690
1691#if defined(CONFIG_OPTPROBES)
1692#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
1693 /* Init kprobe_optinsn_slots */
1694 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
1695#endif
1696 /* By default, kprobes can be optimized */
1697 kprobes_allow_optimization = true;
1698#endif
1699
1244 /* By default, kprobes are armed */ 1700 /* By default, kprobes are armed */
1245 kprobes_all_disarmed = false; 1701 kprobes_all_disarmed = false;
1246 1702
@@ -1259,7 +1715,7 @@ static int __init init_kprobes(void)
1259 1715
1260#ifdef CONFIG_DEBUG_FS 1716#ifdef CONFIG_DEBUG_FS
1261static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 1717static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1262 const char *sym, int offset,char *modname) 1718 const char *sym, int offset, char *modname, struct kprobe *pp)
1263{ 1719{
1264 char *kprobe_type; 1720 char *kprobe_type;
1265 1721
@@ -1269,19 +1725,21 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1269 kprobe_type = "j"; 1725 kprobe_type = "j";
1270 else 1726 else
1271 kprobe_type = "k"; 1727 kprobe_type = "k";
1728
1272 if (sym) 1729 if (sym)
1273 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", 1730 seq_printf(pi, "%p %s %s+0x%x %s ",
1274 p->addr, kprobe_type, sym, offset, 1731 p->addr, kprobe_type, sym, offset,
1275 (modname ? modname : " "), 1732 (modname ? modname : " "));
1276 (kprobe_gone(p) ? "[GONE]" : ""),
1277 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1278 "[DISABLED]" : ""));
1279 else 1733 else
1280 seq_printf(pi, "%p %s %p %s%s\n", 1734 seq_printf(pi, "%p %s %p ",
1281 p->addr, kprobe_type, p->addr, 1735 p->addr, kprobe_type, p->addr);
1282 (kprobe_gone(p) ? "[GONE]" : ""), 1736
1283 ((kprobe_disabled(p) && !kprobe_gone(p)) ? 1737 if (!pp)
1284 "[DISABLED]" : "")); 1738 pp = p;
1739 seq_printf(pi, "%s%s%s\n",
1740 (kprobe_gone(p) ? "[GONE]" : ""),
1741 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
1742 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
1285} 1743}
1286 1744
1287static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1745static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1317,11 +1775,11 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1317 hlist_for_each_entry_rcu(p, node, head, hlist) { 1775 hlist_for_each_entry_rcu(p, node, head, hlist) {
1318 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 1776 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1319 &offset, &modname, namebuf); 1777 &offset, &modname, namebuf);
1320 if (p->pre_handler == aggr_pre_handler) { 1778 if (kprobe_aggrprobe(p)) {
1321 list_for_each_entry_rcu(kp, &p->list, list) 1779 list_for_each_entry_rcu(kp, &p->list, list)
1322 report_probe(pi, kp, sym, offset, modname); 1780 report_probe(pi, kp, sym, offset, modname, p);
1323 } else 1781 } else
1324 report_probe(pi, p, sym, offset, modname); 1782 report_probe(pi, p, sym, offset, modname, NULL);
1325 } 1783 }
1326 preempt_enable(); 1784 preempt_enable();
1327 return 0; 1785 return 0;
@@ -1399,12 +1857,13 @@ int __kprobes enable_kprobe(struct kprobe *kp)
1399 goto out; 1857 goto out;
1400 } 1858 }
1401 1859
1402 if (!kprobes_all_disarmed && kprobe_disabled(p))
1403 arm_kprobe(p);
1404
1405 p->flags &= ~KPROBE_FLAG_DISABLED;
1406 if (p != kp) 1860 if (p != kp)
1407 kp->flags &= ~KPROBE_FLAG_DISABLED; 1861 kp->flags &= ~KPROBE_FLAG_DISABLED;
1862
1863 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
1864 p->flags &= ~KPROBE_FLAG_DISABLED;
1865 arm_kprobe(p);
1866 }
1408out: 1867out:
1409 mutex_unlock(&kprobe_mutex); 1868 mutex_unlock(&kprobe_mutex);
1410 return ret; 1869 return ret;
@@ -1424,12 +1883,13 @@ static void __kprobes arm_all_kprobes(void)
1424 if (!kprobes_all_disarmed) 1883 if (!kprobes_all_disarmed)
1425 goto already_enabled; 1884 goto already_enabled;
1426 1885
1886 /* Arming kprobes doesn't optimize kprobe itself */
1427 mutex_lock(&text_mutex); 1887 mutex_lock(&text_mutex);
1428 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1888 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1429 head = &kprobe_table[i]; 1889 head = &kprobe_table[i];
1430 hlist_for_each_entry_rcu(p, node, head, hlist) 1890 hlist_for_each_entry_rcu(p, node, head, hlist)
1431 if (!kprobe_disabled(p)) 1891 if (!kprobe_disabled(p))
1432 arch_arm_kprobe(p); 1892 __arm_kprobe(p);
1433 } 1893 }
1434 mutex_unlock(&text_mutex); 1894 mutex_unlock(&text_mutex);
1435 1895
@@ -1456,16 +1916,23 @@ static void __kprobes disarm_all_kprobes(void)
1456 1916
1457 kprobes_all_disarmed = true; 1917 kprobes_all_disarmed = true;
1458 printk(KERN_INFO "Kprobes globally disabled\n"); 1918 printk(KERN_INFO "Kprobes globally disabled\n");
1919
1920 /*
1921 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
1922 * because disarming may also unoptimize kprobes.
1923 */
1924 get_online_cpus();
1459 mutex_lock(&text_mutex); 1925 mutex_lock(&text_mutex);
1460 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1926 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1461 head = &kprobe_table[i]; 1927 head = &kprobe_table[i];
1462 hlist_for_each_entry_rcu(p, node, head, hlist) { 1928 hlist_for_each_entry_rcu(p, node, head, hlist) {
1463 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 1929 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1464 arch_disarm_kprobe(p); 1930 __disarm_kprobe(p);
1465 } 1931 }
1466 } 1932 }
1467 1933
1468 mutex_unlock(&text_mutex); 1934 mutex_unlock(&text_mutex);
1935 put_online_cpus();
1469 mutex_unlock(&kprobe_mutex); 1936 mutex_unlock(&kprobe_mutex);
1470 /* Allow all currently running kprobes to complete */ 1937 /* Allow all currently running kprobes to complete */
1471 synchronize_sched(); 1938 synchronize_sched();
diff --git a/kernel/module.c b/kernel/module.c
index f82386bd9ee9..e5538d5f00ad 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod)
474 474
475 INIT_LIST_HEAD(&mod->modules_which_use_me); 475 INIT_LIST_HEAD(&mod->modules_which_use_me);
476 for_each_possible_cpu(cpu) 476 for_each_possible_cpu(cpu)
477 local_set(__module_ref_addr(mod, cpu), 0); 477 per_cpu_ptr(mod->refptr, cpu)->count = 0;
478
478 /* Hold reference count during initialization. */ 479 /* Hold reference count during initialization. */
479 local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); 480 __this_cpu_write(mod->refptr->count, 1);
480 /* Backwards compatibility macros put refcount during init. */ 481 /* Backwards compatibility macros put refcount during init. */
481 mod->waiter = current; 482 mod->waiter = current;
482} 483}
@@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod)
619 int cpu; 620 int cpu;
620 621
621 for_each_possible_cpu(cpu) 622 for_each_possible_cpu(cpu)
622 total += local_read(__module_ref_addr(mod, cpu)); 623 total += per_cpu_ptr(mod->refptr, cpu)->count;
623 return total; 624 return total;
624} 625}
625EXPORT_SYMBOL(module_refcount); 626EXPORT_SYMBOL(module_refcount);
@@ -796,14 +797,15 @@ static struct module_attribute refcnt = {
796void module_put(struct module *module) 797void module_put(struct module *module)
797{ 798{
798 if (module) { 799 if (module) {
799 unsigned int cpu = get_cpu(); 800 preempt_disable();
800 local_dec(__module_ref_addr(module, cpu)); 801 __this_cpu_dec(module->refptr->count);
802
801 trace_module_put(module, _RET_IP_, 803 trace_module_put(module, _RET_IP_,
802 local_read(__module_ref_addr(module, cpu))); 804 __this_cpu_read(module->refptr->count));
803 /* Maybe they're waiting for us to drop reference? */ 805 /* Maybe they're waiting for us to drop reference? */
804 if (unlikely(!module_is_live(module))) 806 if (unlikely(!module_is_live(module)))
805 wake_up_process(module->waiter); 807 wake_up_process(module->waiter);
806 put_cpu(); 808 preempt_enable();
807 } 809 }
808} 810}
809EXPORT_SYMBOL(module_put); 811EXPORT_SYMBOL(module_put);
@@ -1397,9 +1399,9 @@ static void free_module(struct module *mod)
1397 kfree(mod->args); 1399 kfree(mod->args);
1398 if (mod->percpu) 1400 if (mod->percpu)
1399 percpu_modfree(mod->percpu); 1401 percpu_modfree(mod->percpu);
1400#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 1402#if defined(CONFIG_MODULE_UNLOAD)
1401 if (mod->refptr) 1403 if (mod->refptr)
1402 percpu_modfree(mod->refptr); 1404 free_percpu(mod->refptr);
1403#endif 1405#endif
1404 /* Free lock-classes: */ 1406 /* Free lock-classes: */
1405 lockdep_free_key_range(mod->module_core, mod->core_size); 1407 lockdep_free_key_range(mod->module_core, mod->core_size);
@@ -2162,9 +2164,8 @@ static noinline struct module *load_module(void __user *umod,
2162 mod = (void *)sechdrs[modindex].sh_addr; 2164 mod = (void *)sechdrs[modindex].sh_addr;
2163 kmemleak_load_module(mod, hdr, sechdrs, secstrings); 2165 kmemleak_load_module(mod, hdr, sechdrs, secstrings);
2164 2166
2165#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2167#if defined(CONFIG_MODULE_UNLOAD)
2166 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), 2168 mod->refptr = alloc_percpu(struct module_ref);
2167 mod->name);
2168 if (!mod->refptr) { 2169 if (!mod->refptr) {
2169 err = -ENOMEM; 2170 err = -ENOMEM;
2170 goto free_init; 2171 goto free_init;
@@ -2396,8 +2397,8 @@ static noinline struct module *load_module(void __user *umod,
2396 kobject_put(&mod->mkobj.kobj); 2397 kobject_put(&mod->mkobj.kobj);
2397 free_unload: 2398 free_unload:
2398 module_unload_free(mod); 2399 module_unload_free(mod);
2399#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2400#if defined(CONFIG_MODULE_UNLOAD)
2400 percpu_modfree(mod->refptr); 2401 free_percpu(mod->refptr);
2401 free_init: 2402 free_init:
2402#endif 2403#endif
2403 module_free(mod, mod->module_init); 2404 module_free(mod, mod->module_init);
diff --git a/kernel/padata.c b/kernel/padata.c
index 6f9bcb8313d6..93caf65ff57c 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -642,6 +642,9 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
642 if (!pd) 642 if (!pd)
643 goto err_free_inst; 643 goto err_free_inst;
644 644
645 if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL))
646 goto err_free_pd;
647
645 rcu_assign_pointer(pinst->pd, pd); 648 rcu_assign_pointer(pinst->pd, pd);
646 649
647 pinst->wq = wq; 650 pinst->wq = wq;
@@ -654,12 +657,14 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
654 pinst->cpu_notifier.priority = 0; 657 pinst->cpu_notifier.priority = 0;
655 err = register_hotcpu_notifier(&pinst->cpu_notifier); 658 err = register_hotcpu_notifier(&pinst->cpu_notifier);
656 if (err) 659 if (err)
657 goto err_free_pd; 660 goto err_free_cpumask;
658 661
659 mutex_init(&pinst->lock); 662 mutex_init(&pinst->lock);
660 663
661 return pinst; 664 return pinst;
662 665
666err_free_cpumask:
667 free_cpumask_var(pinst->cpumask);
663err_free_pd: 668err_free_pd:
664 padata_free_pd(pd); 669 padata_free_pd(pd);
665err_free_inst: 670err_free_inst:
@@ -685,6 +690,7 @@ void padata_free(struct padata_instance *pinst)
685 690
686 unregister_hotcpu_notifier(&pinst->cpu_notifier); 691 unregister_hotcpu_notifier(&pinst->cpu_notifier);
687 padata_free_pd(pinst->pd); 692 padata_free_pd(pinst->pd);
693 free_cpumask_var(pinst->cpumask);
688 kfree(pinst); 694 kfree(pinst);
689} 695}
690EXPORT_SYMBOL(padata_free); 696EXPORT_SYMBOL(padata_free);
diff --git a/kernel/printk.c b/kernel/printk.c
index 1751c456b71f..40674122ecf2 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -35,6 +35,7 @@
35#include <linux/kexec.h> 35#include <linux/kexec.h>
36#include <linux/ratelimit.h> 36#include <linux/ratelimit.h>
37#include <linux/kmsg_dump.h> 37#include <linux/kmsg_dump.h>
38#include <linux/syslog.h>
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40 41
@@ -258,38 +259,23 @@ static inline void boot_delay_msec(void)
258} 259}
259#endif 260#endif
260 261
261/* 262int do_syslog(int type, char __user *buf, int len, bool from_file)
262 * Commands to do_syslog:
263 *
264 * 0 -- Close the log. Currently a NOP.
265 * 1 -- Open the log. Currently a NOP.
266 * 2 -- Read from the log.
267 * 3 -- Read all messages remaining in the ring buffer.
268 * 4 -- Read and clear all messages remaining in the ring buffer
269 * 5 -- Clear ring buffer.
270 * 6 -- Disable printk's to console
271 * 7 -- Enable printk's to console
272 * 8 -- Set level of messages printed to console
273 * 9 -- Return number of unread characters in the log buffer
274 * 10 -- Return size of the log buffer
275 */
276int do_syslog(int type, char __user *buf, int len)
277{ 263{
278 unsigned i, j, limit, count; 264 unsigned i, j, limit, count;
279 int do_clear = 0; 265 int do_clear = 0;
280 char c; 266 char c;
281 int error = 0; 267 int error = 0;
282 268
283 error = security_syslog(type); 269 error = security_syslog(type, from_file);
284 if (error) 270 if (error)
285 return error; 271 return error;
286 272
287 switch (type) { 273 switch (type) {
288 case 0: /* Close log */ 274 case SYSLOG_ACTION_CLOSE: /* Close log */
289 break; 275 break;
290 case 1: /* Open log */ 276 case SYSLOG_ACTION_OPEN: /* Open log */
291 break; 277 break;
292 case 2: /* Read from log */ 278 case SYSLOG_ACTION_READ: /* Read from log */
293 error = -EINVAL; 279 error = -EINVAL;
294 if (!buf || len < 0) 280 if (!buf || len < 0)
295 goto out; 281 goto out;
@@ -320,10 +306,12 @@ int do_syslog(int type, char __user *buf, int len)
320 if (!error) 306 if (!error)
321 error = i; 307 error = i;
322 break; 308 break;
323 case 4: /* Read/clear last kernel messages */ 309 /* Read/clear last kernel messages */
310 case SYSLOG_ACTION_READ_CLEAR:
324 do_clear = 1; 311 do_clear = 1;
325 /* FALL THRU */ 312 /* FALL THRU */
326 case 3: /* Read last kernel messages */ 313 /* Read last kernel messages */
314 case SYSLOG_ACTION_READ_ALL:
327 error = -EINVAL; 315 error = -EINVAL;
328 if (!buf || len < 0) 316 if (!buf || len < 0)
329 goto out; 317 goto out;
@@ -376,21 +364,25 @@ int do_syslog(int type, char __user *buf, int len)
376 } 364 }
377 } 365 }
378 break; 366 break;
379 case 5: /* Clear ring buffer */ 367 /* Clear ring buffer */
368 case SYSLOG_ACTION_CLEAR:
380 logged_chars = 0; 369 logged_chars = 0;
381 break; 370 break;
382 case 6: /* Disable logging to console */ 371 /* Disable logging to console */
372 case SYSLOG_ACTION_CONSOLE_OFF:
383 if (saved_console_loglevel == -1) 373 if (saved_console_loglevel == -1)
384 saved_console_loglevel = console_loglevel; 374 saved_console_loglevel = console_loglevel;
385 console_loglevel = minimum_console_loglevel; 375 console_loglevel = minimum_console_loglevel;
386 break; 376 break;
387 case 7: /* Enable logging to console */ 377 /* Enable logging to console */
378 case SYSLOG_ACTION_CONSOLE_ON:
388 if (saved_console_loglevel != -1) { 379 if (saved_console_loglevel != -1) {
389 console_loglevel = saved_console_loglevel; 380 console_loglevel = saved_console_loglevel;
390 saved_console_loglevel = -1; 381 saved_console_loglevel = -1;
391 } 382 }
392 break; 383 break;
393 case 8: /* Set level of messages printed to console */ 384 /* Set level of messages printed to console */
385 case SYSLOG_ACTION_CONSOLE_LEVEL:
394 error = -EINVAL; 386 error = -EINVAL;
395 if (len < 1 || len > 8) 387 if (len < 1 || len > 8)
396 goto out; 388 goto out;
@@ -401,10 +393,12 @@ int do_syslog(int type, char __user *buf, int len)
401 saved_console_loglevel = -1; 393 saved_console_loglevel = -1;
402 error = 0; 394 error = 0;
403 break; 395 break;
404 case 9: /* Number of chars in the log buffer */ 396 /* Number of chars in the log buffer */
397 case SYSLOG_ACTION_SIZE_UNREAD:
405 error = log_end - log_start; 398 error = log_end - log_start;
406 break; 399 break;
407 case 10: /* Size of the log buffer */ 400 /* Size of the log buffer */
401 case SYSLOG_ACTION_SIZE_BUFFER:
408 error = log_buf_len; 402 error = log_buf_len;
409 break; 403 break;
410 default: 404 default:
@@ -417,7 +411,7 @@ out:
417 411
418SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) 412SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
419{ 413{
420 return do_syslog(type, buf, len); 414 return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
421} 415}
422 416
423/* 417/*
diff --git a/kernel/range.c b/kernel/range.c
new file mode 100644
index 000000000000..74e2e6114927
--- /dev/null
+++ b/kernel/range.c
@@ -0,0 +1,163 @@
1/*
2 * Range add and subtract
3 */
4#include <linux/module.h>
5#include <linux/init.h>
6#include <linux/sort.h>
7
8#include <linux/range.h>
9
10#ifndef ARRAY_SIZE
11#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
12#endif
13
14int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
15{
16 if (start >= end)
17 return nr_range;
18
19 /* Out of slots: */
20 if (nr_range >= az)
21 return nr_range;
22
23 range[nr_range].start = start;
24 range[nr_range].end = end;
25
26 nr_range++;
27
28 return nr_range;
29}
30
31int add_range_with_merge(struct range *range, int az, int nr_range,
32 u64 start, u64 end)
33{
34 int i;
35
36 if (start >= end)
37 return nr_range;
38
39 /* Try to merge it with old one: */
40 for (i = 0; i < nr_range; i++) {
41 u64 final_start, final_end;
42 u64 common_start, common_end;
43
44 if (!range[i].end)
45 continue;
46
47 common_start = max(range[i].start, start);
48 common_end = min(range[i].end, end);
49 if (common_start > common_end)
50 continue;
51
52 final_start = min(range[i].start, start);
53 final_end = max(range[i].end, end);
54
55 range[i].start = final_start;
56 range[i].end = final_end;
57 return nr_range;
58 }
59
60 /* Need to add it: */
61 return add_range(range, az, nr_range, start, end);
62}
63
64void subtract_range(struct range *range, int az, u64 start, u64 end)
65{
66 int i, j;
67
68 if (start >= end)
69 return;
70
71 for (j = 0; j < az; j++) {
72 if (!range[j].end)
73 continue;
74
75 if (start <= range[j].start && end >= range[j].end) {
76 range[j].start = 0;
77 range[j].end = 0;
78 continue;
79 }
80
81 if (start <= range[j].start && end < range[j].end &&
82 range[j].start < end) {
83 range[j].start = end;
84 continue;
85 }
86
87
88 if (start > range[j].start && end >= range[j].end &&
89 range[j].end > start) {
90 range[j].end = start;
91 continue;
92 }
93
94 if (start > range[j].start && end < range[j].end) {
95 /* Find the new spare: */
96 for (i = 0; i < az; i++) {
97 if (range[i].end == 0)
98 break;
99 }
100 if (i < az) {
101 range[i].end = range[j].end;
102 range[i].start = end;
103 } else {
104 printk(KERN_ERR "run of slot in ranges\n");
105 }
106 range[j].end = start;
107 continue;
108 }
109 }
110}
111
112static int cmp_range(const void *x1, const void *x2)
113{
114 const struct range *r1 = x1;
115 const struct range *r2 = x2;
116 s64 start1, start2;
117
118 start1 = r1->start;
119 start2 = r2->start;
120
121 return start1 - start2;
122}
123
124int clean_sort_range(struct range *range, int az)
125{
126 int i, j, k = az - 1, nr_range = 0;
127
128 for (i = 0; i < k; i++) {
129 if (range[i].end)
130 continue;
131 for (j = k; j > i; j--) {
132 if (range[j].end) {
133 k = j;
134 break;
135 }
136 }
137 if (j == i)
138 break;
139 range[i].start = range[k].start;
140 range[i].end = range[k].end;
141 range[k].start = 0;
142 range[k].end = 0;
143 k--;
144 }
145 /* count it */
146 for (i = 0; i < az; i++) {
147 if (!range[i].end) {
148 nr_range = i;
149 break;
150 }
151 }
152
153 /* sort them */
154 sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
155
156 return nr_range;
157}
158
159void sort_range(struct range *range, int nr_range)
160{
161 /* sort them */
162 sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
163}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 258cdf0a91eb..58df55bf83ed 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -818,13 +818,13 @@ static void rcu_torture_timer(unsigned long unused)
818 /* Should not happen, but... */ 818 /* Should not happen, but... */
819 pipe_count = RCU_TORTURE_PIPE_LEN; 819 pipe_count = RCU_TORTURE_PIPE_LEN;
820 } 820 }
821 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 821 __this_cpu_inc(rcu_torture_count[pipe_count]);
822 completed = cur_ops->completed() - completed; 822 completed = cur_ops->completed() - completed;
823 if (completed > RCU_TORTURE_PIPE_LEN) { 823 if (completed > RCU_TORTURE_PIPE_LEN) {
824 /* Should not happen, but... */ 824 /* Should not happen, but... */
825 completed = RCU_TORTURE_PIPE_LEN; 825 completed = RCU_TORTURE_PIPE_LEN;
826 } 826 }
827 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 827 __this_cpu_inc(rcu_torture_batch[completed]);
828 preempt_enable(); 828 preempt_enable();
829 cur_ops->readunlock(idx); 829 cur_ops->readunlock(idx);
830} 830}
@@ -877,13 +877,13 @@ rcu_torture_reader(void *arg)
877 /* Should not happen, but... */ 877 /* Should not happen, but... */
878 pipe_count = RCU_TORTURE_PIPE_LEN; 878 pipe_count = RCU_TORTURE_PIPE_LEN;
879 } 879 }
880 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 880 __this_cpu_inc(rcu_torture_count[pipe_count]);
881 completed = cur_ops->completed() - completed; 881 completed = cur_ops->completed() - completed;
882 if (completed > RCU_TORTURE_PIPE_LEN) { 882 if (completed > RCU_TORTURE_PIPE_LEN) {
883 /* Should not happen, but... */ 883 /* Should not happen, but... */
884 completed = RCU_TORTURE_PIPE_LEN; 884 completed = RCU_TORTURE_PIPE_LEN;
885 } 885 }
886 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 886 __this_cpu_inc(rcu_torture_batch[completed]);
887 preempt_enable(); 887 preempt_enable();
888 cur_ops->readunlock(idx); 888 cur_ops->readunlock(idx);
889 schedule(); 889 schedule();
diff --git a/kernel/resource.c b/kernel/resource.c
index 4e9d87fd7bc5..2d5be5d9bf5f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -304,7 +304,7 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
304 void *arg, int (*func)(unsigned long, unsigned long, void *)) 304 void *arg, int (*func)(unsigned long, unsigned long, void *))
305{ 305{
306 struct resource res; 306 struct resource res;
307 unsigned long pfn, len; 307 unsigned long pfn, end_pfn;
308 u64 orig_end; 308 u64 orig_end;
309 int ret = -1; 309 int ret = -1;
310 310
@@ -314,9 +314,10 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
314 orig_end = res.end; 314 orig_end = res.end;
315 while ((res.start < res.end) && 315 while ((res.start < res.end) &&
316 (find_next_system_ram(&res, "System RAM") >= 0)) { 316 (find_next_system_ram(&res, "System RAM") >= 0)) {
317 pfn = (unsigned long)(res.start >> PAGE_SHIFT); 317 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
318 len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); 318 end_pfn = (res.end + 1) >> PAGE_SHIFT;
319 ret = (*func)(pfn, len, arg); 319 if (end_pfn > pfn)
320 ret = (*func)(pfn, end_pfn - pfn, arg);
320 if (ret) 321 if (ret)
321 break; 322 break;
322 res.start = res.end + 1; 323 res.start = res.end + 1;
diff --git a/kernel/sched.c b/kernel/sched.c
index 6a212c97f523..abb36b16b93b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1521,7 +1521,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1521 1521
1522#ifdef CONFIG_FAIR_GROUP_SCHED 1522#ifdef CONFIG_FAIR_GROUP_SCHED
1523 1523
1524static __read_mostly unsigned long *update_shares_data; 1524static __read_mostly unsigned long __percpu *update_shares_data;
1525 1525
1526static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1526static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1527 1527
@@ -8813,7 +8813,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
8813struct cpuacct { 8813struct cpuacct {
8814 struct cgroup_subsys_state css; 8814 struct cgroup_subsys_state css;
8815 /* cpuusage holds pointer to a u64-type object on every cpu */ 8815 /* cpuusage holds pointer to a u64-type object on every cpu */
8816 u64 *cpuusage; 8816 u64 __percpu *cpuusage;
8817 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; 8817 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
8818 struct cpuacct *parent; 8818 struct cpuacct *parent;
8819}; 8819};
diff --git a/kernel/signal.c b/kernel/signal.c
index 934ae5e687b9..5bb9baffa4f1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -159,6 +159,10 @@ void recalc_sigpending(void)
159 159
160/* Given the mask, find the first available signal that should be serviced. */ 160/* Given the mask, find the first available signal that should be serviced. */
161 161
162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
162int next_signal(struct sigpending *pending, sigset_t *mask) 166int next_signal(struct sigpending *pending, sigset_t *mask)
163{ 167{
164 unsigned long i, *s, *m, x; 168 unsigned long i, *s, *m, x;
@@ -166,26 +170,39 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
166 170
167 s = pending->signal.sig; 171 s = pending->signal.sig;
168 m = mask->sig; 172 m = mask->sig;
173
174 /*
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
177 */
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
169 switch (_NSIG_WORDS) { 186 switch (_NSIG_WORDS) {
170 default: 187 default:
171 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) 188 for (i = 1; i < _NSIG_WORDS; ++i) {
172 if ((x = *s &~ *m) != 0) { 189 x = *++s &~ *++m;
173 sig = ffz(~x) + i*_NSIG_BPW + 1; 190 if (!x)
174 break; 191 continue;
175 } 192 sig = ffz(~x) + i*_NSIG_BPW + 1;
193 break;
194 }
176 break; 195 break;
177 196
178 case 2: if ((x = s[0] &~ m[0]) != 0) 197 case 2:
179 sig = 1; 198 x = s[1] &~ m[1];
180 else if ((x = s[1] &~ m[1]) != 0) 199 if (!x)
181 sig = _NSIG_BPW + 1;
182 else
183 break; 200 break;
184 sig += ffz(~x); 201 sig = ffz(~x) + _NSIG_BPW + 1;
185 break; 202 break;
186 203
187 case 1: if ((x = *s &~ *m) != 0) 204 case 1:
188 sig = ffz(~x) + 1; 205 /* Nothing to do */
189 break; 206 break;
190 } 207 }
191 208
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 912823e2a11b..9bb9fb1bd79c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -45,7 +45,7 @@ static int refcount;
45static struct workqueue_struct *stop_machine_wq; 45static struct workqueue_struct *stop_machine_wq;
46static struct stop_machine_data active, idle; 46static struct stop_machine_data active, idle;
47static const struct cpumask *active_cpus; 47static const struct cpumask *active_cpus;
48static void *stop_machine_work; 48static void __percpu *stop_machine_work;
49 49
50static void set_state(enum stopmachine_state newstate) 50static void set_state(enum stopmachine_state newstate)
51{ 51{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 33e7a38b6eb9..0ef19c614f6d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -50,6 +50,7 @@
50#include <linux/ftrace.h> 50#include <linux/ftrace.h>
51#include <linux/slow-work.h> 51#include <linux/slow-work.h>
52#include <linux/perf_event.h> 52#include <linux/perf_event.h>
53#include <linux/kprobes.h>
53 54
54#include <asm/uaccess.h> 55#include <asm/uaccess.h>
55#include <asm/processor.h> 56#include <asm/processor.h>
@@ -1450,6 +1451,17 @@ static struct ctl_table debug_table[] = {
1450 .proc_handler = proc_dointvec 1451 .proc_handler = proc_dointvec
1451 }, 1452 },
1452#endif 1453#endif
1454#if defined(CONFIG_OPTPROBES)
1455 {
1456 .procname = "kprobes-optimization",
1457 .data = &sysctl_kprobes_optimization,
1458 .maxlen = sizeof(int),
1459 .mode = 0644,
1460 .proc_handler = proc_kprobes_optimization_handler,
1461 .extra1 = &zero,
1462 .extra2 = &one,
1463 },
1464#endif
1453 { } 1465 { }
1454}; 1466};
1455 1467
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 8f5d16e0707a..8cd50d8f9bde 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1331,7 +1331,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1331 ssize_t result; 1331 ssize_t result;
1332 char *pathname; 1332 char *pathname;
1333 int flags; 1333 int flags;
1334 int acc_mode, fmode; 1334 int acc_mode;
1335 1335
1336 pathname = sysctl_getname(name, nlen, &table); 1336 pathname = sysctl_getname(name, nlen, &table);
1337 result = PTR_ERR(pathname); 1337 result = PTR_ERR(pathname);
@@ -1342,15 +1342,12 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1342 if (oldval && oldlen && newval && newlen) { 1342 if (oldval && oldlen && newval && newlen) {
1343 flags = O_RDWR; 1343 flags = O_RDWR;
1344 acc_mode = MAY_READ | MAY_WRITE; 1344 acc_mode = MAY_READ | MAY_WRITE;
1345 fmode = FMODE_READ | FMODE_WRITE;
1346 } else if (newval && newlen) { 1345 } else if (newval && newlen) {
1347 flags = O_WRONLY; 1346 flags = O_WRONLY;
1348 acc_mode = MAY_WRITE; 1347 acc_mode = MAY_WRITE;
1349 fmode = FMODE_WRITE;
1350 } else if (oldval && oldlen) { 1348 } else if (oldval && oldlen) {
1351 flags = O_RDONLY; 1349 flags = O_RDONLY;
1352 acc_mode = MAY_READ; 1350 acc_mode = MAY_READ;
1353 fmode = FMODE_READ;
1354 } else { 1351 } else {
1355 result = 0; 1352 result = 0;
1356 goto out_putname; 1353 goto out_putname;
@@ -1361,7 +1358,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1361 if (result) 1358 if (result)
1362 goto out_putname; 1359 goto out_putname;
1363 1360
1364 result = may_open(&nd.path, acc_mode, fmode); 1361 result = may_open(&nd.path, acc_mode, flags);
1365 if (result) 1362 if (result)
1366 goto out_putpath; 1363 goto out_putpath;
1367 1364
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8c1b2d290718..0287f9f52f5a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22 22
23#include <asm/local.h>
23#include "trace.h" 24#include "trace.h"
24 25
25/* 26/*
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index b2477caf09c2..df74c7982255 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -8,6 +8,7 @@
8#include <linux/kthread.h> 8#include <linux/kthread.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <asm/local.h>
11 12
12struct rb_page { 13struct rb_page {
13 u64 ts; 14 u64 ts;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 032c57ca6502..ed01fdba4a55 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -92,12 +92,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
92static inline void ftrace_disable_cpu(void) 92static inline void ftrace_disable_cpu(void)
93{ 93{
94 preempt_disable(); 94 preempt_disable();
95 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); 95 __this_cpu_inc(ftrace_cpu_disabled);
96} 96}
97 97
98static inline void ftrace_enable_cpu(void) 98static inline void ftrace_enable_cpu(void)
99{ 99{
100 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); 100 __this_cpu_dec(ftrace_cpu_disabled);
101 preempt_enable(); 101 preempt_enable();
102} 102}
103 103
@@ -1166,7 +1166,7 @@ trace_function(struct trace_array *tr,
1166 struct ftrace_entry *entry; 1166 struct ftrace_entry *entry;
1167 1167
1168 /* If we are reading the ring buffer, don't trace */ 1168 /* If we are reading the ring buffer, don't trace */
1169 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 1169 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1170 return; 1170 return;
1171 1171
1172 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1172 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e998a824e9db..3fc2a575664f 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -188,7 +188,7 @@ static int __trace_graph_entry(struct trace_array *tr,
188 struct ring_buffer *buffer = tr->buffer; 188 struct ring_buffer *buffer = tr->buffer;
189 struct ftrace_graph_ent_entry *entry; 189 struct ftrace_graph_ent_entry *entry;
190 190
191 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 191 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
192 return 0; 192 return 0;
193 193
194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -247,7 +247,7 @@ static void __trace_graph_return(struct trace_array *tr,
247 struct ring_buffer *buffer = tr->buffer; 247 struct ring_buffer *buffer = tr->buffer;
248 struct ftrace_graph_ret_entry *entry; 248 struct ftrace_graph_ret_entry *entry;
249 249
250 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 250 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
251 return; 251 return;
252 252
253 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 253 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
diff --git a/mm/Kconfig b/mm/Kconfig
index d34c2b971032..9c61158308dc 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -115,6 +115,10 @@ config SPARSEMEM_EXTREME
115config SPARSEMEM_VMEMMAP_ENABLE 115config SPARSEMEM_VMEMMAP_ENABLE
116 bool 116 bool
117 117
118config SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
119 def_bool y
120 depends on SPARSEMEM && X86_64
121
118config SPARSEMEM_VMEMMAP 122config SPARSEMEM_VMEMMAP
119 bool "Sparse Memory virtual memmap" 123 bool "Sparse Memory virtual memmap"
120 depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE 124 depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 7d1486875e1c..d7c791ef0036 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -13,6 +13,7 @@
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/kmemleak.h> 15#include <linux/kmemleak.h>
16#include <linux/range.h>
16 17
17#include <asm/bug.h> 18#include <asm/bug.h>
18#include <asm/io.h> 19#include <asm/io.h>
@@ -32,6 +33,7 @@ unsigned long max_pfn;
32unsigned long saved_max_pfn; 33unsigned long saved_max_pfn;
33#endif 34#endif
34 35
36#ifndef CONFIG_NO_BOOTMEM
35bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 37bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
36 38
37static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); 39static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
@@ -142,7 +144,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
142 min_low_pfn = start; 144 min_low_pfn = start;
143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
144} 146}
145 147#endif
146/* 148/*
147 * free_bootmem_late - free bootmem pages directly to page allocator 149 * free_bootmem_late - free bootmem pages directly to page allocator
148 * @addr: starting address of the range 150 * @addr: starting address of the range
@@ -167,6 +169,60 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
167 } 169 }
168} 170}
169 171
172#ifdef CONFIG_NO_BOOTMEM
173static void __init __free_pages_memory(unsigned long start, unsigned long end)
174{
175 int i;
176 unsigned long start_aligned, end_aligned;
177 int order = ilog2(BITS_PER_LONG);
178
179 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
180 end_aligned = end & ~(BITS_PER_LONG - 1);
181
182 if (end_aligned <= start_aligned) {
183#if 1
184 printk(KERN_DEBUG " %lx - %lx\n", start, end);
185#endif
186 for (i = start; i < end; i++)
187 __free_pages_bootmem(pfn_to_page(i), 0);
188
189 return;
190 }
191
192#if 1
193 printk(KERN_DEBUG " %lx %lx - %lx %lx\n",
194 start, start_aligned, end_aligned, end);
195#endif
196 for (i = start; i < start_aligned; i++)
197 __free_pages_bootmem(pfn_to_page(i), 0);
198
199 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
200 __free_pages_bootmem(pfn_to_page(i), order);
201
202 for (i = end_aligned; i < end; i++)
203 __free_pages_bootmem(pfn_to_page(i), 0);
204}
205
206unsigned long __init free_all_memory_core_early(int nodeid)
207{
208 int i;
209 u64 start, end;
210 unsigned long count = 0;
211 struct range *range = NULL;
212 int nr_range;
213
214 nr_range = get_free_all_memory_range(&range, nodeid);
215
216 for (i = 0; i < nr_range; i++) {
217 start = range[i].start;
218 end = range[i].end;
219 count += end - start;
220 __free_pages_memory(start, end);
221 }
222
223 return count;
224}
225#else
170static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 226static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
171{ 227{
172 int aligned; 228 int aligned;
@@ -227,6 +283,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
227 283
228 return count; 284 return count;
229} 285}
286#endif
230 287
231/** 288/**
232 * free_all_bootmem_node - release a node's free pages to the buddy allocator 289 * free_all_bootmem_node - release a node's free pages to the buddy allocator
@@ -237,7 +294,12 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
237unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) 294unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
238{ 295{
239 register_page_bootmem_info_node(pgdat); 296 register_page_bootmem_info_node(pgdat);
297#ifdef CONFIG_NO_BOOTMEM
298 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
299 return 0;
300#else
240 return free_all_bootmem_core(pgdat->bdata); 301 return free_all_bootmem_core(pgdat->bdata);
302#endif
241} 303}
242 304
243/** 305/**
@@ -247,9 +309,14 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
247 */ 309 */
248unsigned long __init free_all_bootmem(void) 310unsigned long __init free_all_bootmem(void)
249{ 311{
312#ifdef CONFIG_NO_BOOTMEM
313 return free_all_memory_core_early(NODE_DATA(0)->node_id);
314#else
250 return free_all_bootmem_core(NODE_DATA(0)->bdata); 315 return free_all_bootmem_core(NODE_DATA(0)->bdata);
316#endif
251} 317}
252 318
319#ifndef CONFIG_NO_BOOTMEM
253static void __init __free(bootmem_data_t *bdata, 320static void __init __free(bootmem_data_t *bdata,
254 unsigned long sidx, unsigned long eidx) 321 unsigned long sidx, unsigned long eidx)
255{ 322{
@@ -344,6 +411,7 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
344 } 411 }
345 BUG(); 412 BUG();
346} 413}
414#endif
347 415
348/** 416/**
349 * free_bootmem_node - mark a page range as usable 417 * free_bootmem_node - mark a page range as usable
@@ -358,6 +426,12 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
358void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 426void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
359 unsigned long size) 427 unsigned long size)
360{ 428{
429#ifdef CONFIG_NO_BOOTMEM
430 free_early(physaddr, physaddr + size);
431#if 0
432 printk(KERN_DEBUG "free %lx %lx\n", physaddr, size);
433#endif
434#else
361 unsigned long start, end; 435 unsigned long start, end;
362 436
363 kmemleak_free_part(__va(physaddr), size); 437 kmemleak_free_part(__va(physaddr), size);
@@ -366,6 +440,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
366 end = PFN_DOWN(physaddr + size); 440 end = PFN_DOWN(physaddr + size);
367 441
368 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); 442 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
443#endif
369} 444}
370 445
371/** 446/**
@@ -379,6 +454,12 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
379 */ 454 */
380void __init free_bootmem(unsigned long addr, unsigned long size) 455void __init free_bootmem(unsigned long addr, unsigned long size)
381{ 456{
457#ifdef CONFIG_NO_BOOTMEM
458 free_early(addr, addr + size);
459#if 0
460 printk(KERN_DEBUG "free %lx %lx\n", addr, size);
461#endif
462#else
382 unsigned long start, end; 463 unsigned long start, end;
383 464
384 kmemleak_free_part(__va(addr), size); 465 kmemleak_free_part(__va(addr), size);
@@ -387,6 +468,7 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
387 end = PFN_DOWN(addr + size); 468 end = PFN_DOWN(addr + size);
388 469
389 mark_bootmem(start, end, 0, 0); 470 mark_bootmem(start, end, 0, 0);
471#endif
390} 472}
391 473
392/** 474/**
@@ -403,12 +485,17 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
403int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 485int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
404 unsigned long size, int flags) 486 unsigned long size, int flags)
405{ 487{
488#ifdef CONFIG_NO_BOOTMEM
489 panic("no bootmem");
490 return 0;
491#else
406 unsigned long start, end; 492 unsigned long start, end;
407 493
408 start = PFN_DOWN(physaddr); 494 start = PFN_DOWN(physaddr);
409 end = PFN_UP(physaddr + size); 495 end = PFN_UP(physaddr + size);
410 496
411 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 497 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
498#endif
412} 499}
413 500
414/** 501/**
@@ -424,14 +511,20 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
424int __init reserve_bootmem(unsigned long addr, unsigned long size, 511int __init reserve_bootmem(unsigned long addr, unsigned long size,
425 int flags) 512 int flags)
426{ 513{
514#ifdef CONFIG_NO_BOOTMEM
515 panic("no bootmem");
516 return 0;
517#else
427 unsigned long start, end; 518 unsigned long start, end;
428 519
429 start = PFN_DOWN(addr); 520 start = PFN_DOWN(addr);
430 end = PFN_UP(addr + size); 521 end = PFN_UP(addr + size);
431 522
432 return mark_bootmem(start, end, 1, flags); 523 return mark_bootmem(start, end, 1, flags);
524#endif
433} 525}
434 526
527#ifndef CONFIG_NO_BOOTMEM
435static unsigned long __init align_idx(struct bootmem_data *bdata, 528static unsigned long __init align_idx(struct bootmem_data *bdata,
436 unsigned long idx, unsigned long step) 529 unsigned long idx, unsigned long step)
437{ 530{
@@ -582,12 +675,33 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
582#endif 675#endif
583 return NULL; 676 return NULL;
584} 677}
678#endif
585 679
586static void * __init ___alloc_bootmem_nopanic(unsigned long size, 680static void * __init ___alloc_bootmem_nopanic(unsigned long size,
587 unsigned long align, 681 unsigned long align,
588 unsigned long goal, 682 unsigned long goal,
589 unsigned long limit) 683 unsigned long limit)
590{ 684{
685#ifdef CONFIG_NO_BOOTMEM
686 void *ptr;
687
688 if (WARN_ON_ONCE(slab_is_available()))
689 return kzalloc(size, GFP_NOWAIT);
690
691restart:
692
693 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
694
695 if (ptr)
696 return ptr;
697
698 if (goal != 0) {
699 goal = 0;
700 goto restart;
701 }
702
703 return NULL;
704#else
591 bootmem_data_t *bdata; 705 bootmem_data_t *bdata;
592 void *region; 706 void *region;
593 707
@@ -613,6 +727,7 @@ restart:
613 } 727 }
614 728
615 return NULL; 729 return NULL;
730#endif
616} 731}
617 732
618/** 733/**
@@ -631,7 +746,13 @@ restart:
631void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 746void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
632 unsigned long goal) 747 unsigned long goal)
633{ 748{
634 return ___alloc_bootmem_nopanic(size, align, goal, 0); 749 unsigned long limit = 0;
750
751#ifdef CONFIG_NO_BOOTMEM
752 limit = -1UL;
753#endif
754
755 return ___alloc_bootmem_nopanic(size, align, goal, limit);
635} 756}
636 757
637static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, 758static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
@@ -665,9 +786,16 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
665void * __init __alloc_bootmem(unsigned long size, unsigned long align, 786void * __init __alloc_bootmem(unsigned long size, unsigned long align,
666 unsigned long goal) 787 unsigned long goal)
667{ 788{
668 return ___alloc_bootmem(size, align, goal, 0); 789 unsigned long limit = 0;
790
791#ifdef CONFIG_NO_BOOTMEM
792 limit = -1UL;
793#endif
794
795 return ___alloc_bootmem(size, align, goal, limit);
669} 796}
670 797
798#ifndef CONFIG_NO_BOOTMEM
671static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, 799static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
672 unsigned long size, unsigned long align, 800 unsigned long size, unsigned long align,
673 unsigned long goal, unsigned long limit) 801 unsigned long goal, unsigned long limit)
@@ -684,6 +812,7 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
684 812
685 return ___alloc_bootmem(size, align, goal, limit); 813 return ___alloc_bootmem(size, align, goal, limit);
686} 814}
815#endif
687 816
688/** 817/**
689 * __alloc_bootmem_node - allocate boot memory from a specific node 818 * __alloc_bootmem_node - allocate boot memory from a specific node
@@ -706,7 +835,46 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
706 if (WARN_ON_ONCE(slab_is_available())) 835 if (WARN_ON_ONCE(slab_is_available()))
707 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 836 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
708 837
838#ifdef CONFIG_NO_BOOTMEM
839 return __alloc_memory_core_early(pgdat->node_id, size, align,
840 goal, -1ULL);
841#else
709 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); 842 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
843#endif
844}
845
846void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
847 unsigned long align, unsigned long goal)
848{
849#ifdef MAX_DMA32_PFN
850 unsigned long end_pfn;
851
852 if (WARN_ON_ONCE(slab_is_available()))
853 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
854
855 /* update goal according ...MAX_DMA32_PFN */
856 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
857
858 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
859 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
860 void *ptr;
861 unsigned long new_goal;
862
863 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
864#ifdef CONFIG_NO_BOOTMEM
865 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
866 new_goal, -1ULL);
867#else
868 ptr = alloc_bootmem_core(pgdat->bdata, size, align,
869 new_goal, 0);
870#endif
871 if (ptr)
872 return ptr;
873 }
874#endif
875
876 return __alloc_bootmem_node(pgdat, size, align, goal);
877
710} 878}
711 879
712#ifdef CONFIG_SPARSEMEM 880#ifdef CONFIG_SPARSEMEM
@@ -720,6 +888,16 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
720void * __init alloc_bootmem_section(unsigned long size, 888void * __init alloc_bootmem_section(unsigned long size,
721 unsigned long section_nr) 889 unsigned long section_nr)
722{ 890{
891#ifdef CONFIG_NO_BOOTMEM
892 unsigned long pfn, goal, limit;
893
894 pfn = section_nr_to_pfn(section_nr);
895 goal = pfn << PAGE_SHIFT;
896 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
897
898 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
899 SMP_CACHE_BYTES, goal, limit);
900#else
723 bootmem_data_t *bdata; 901 bootmem_data_t *bdata;
724 unsigned long pfn, goal, limit; 902 unsigned long pfn, goal, limit;
725 903
@@ -729,6 +907,7 @@ void * __init alloc_bootmem_section(unsigned long size,
729 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; 907 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
730 908
731 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); 909 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
910#endif
732} 911}
733#endif 912#endif
734 913
@@ -740,11 +919,16 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
740 if (WARN_ON_ONCE(slab_is_available())) 919 if (WARN_ON_ONCE(slab_is_available()))
741 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 920 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
742 921
922#ifdef CONFIG_NO_BOOTMEM
923 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
924 goal, -1ULL);
925#else
743 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); 926 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
744 if (ptr) 927 if (ptr)
745 return ptr; 928 return ptr;
746 929
747 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 930 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
931#endif
748 if (ptr) 932 if (ptr)
749 return ptr; 933 return ptr;
750 934
@@ -795,6 +979,11 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
795 if (WARN_ON_ONCE(slab_is_available())) 979 if (WARN_ON_ONCE(slab_is_available()))
796 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 980 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
797 981
982#ifdef CONFIG_NO_BOOTMEM
983 return __alloc_memory_core_early(pgdat->node_id, size, align,
984 goal, ARCH_LOW_ADDRESS_LIMIT);
985#else
798 return ___alloc_bootmem_node(pgdat->bdata, size, align, 986 return ___alloc_bootmem_node(pgdat->bdata, size, align,
799 goal, ARCH_LOW_ADDRESS_LIMIT); 987 goal, ARCH_LOW_ADDRESS_LIMIT);
988#endif
800} 989}
diff --git a/mm/filemap.c b/mm/filemap.c
index 698ea80f2102..148b52a5bb7e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1117,7 +1117,7 @@ readpage:
1117 if (!PageUptodate(page)) { 1117 if (!PageUptodate(page)) {
1118 if (page->mapping == NULL) { 1118 if (page->mapping == NULL) {
1119 /* 1119 /*
1120 * invalidate_inode_pages got it 1120 * invalidate_mapping_pages got it
1121 */ 1121 */
1122 unlock_page(page); 1122 unlock_page(page);
1123 page_cache_release(page); 1123 page_cache_release(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8deb9d0fd5b1..a6b17aa4740b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1009,10 +1009,10 @@ static void drain_pages(unsigned int cpu)
1009 struct per_cpu_pageset *pset; 1009 struct per_cpu_pageset *pset;
1010 struct per_cpu_pages *pcp; 1010 struct per_cpu_pages *pcp;
1011 1011
1012 pset = zone_pcp(zone, cpu); 1012 local_irq_save(flags);
1013 pset = per_cpu_ptr(zone->pageset, cpu);
1013 1014
1014 pcp = &pset->pcp; 1015 pcp = &pset->pcp;
1015 local_irq_save(flags);
1016 free_pcppages_bulk(zone, pcp->count, pcp); 1016 free_pcppages_bulk(zone, pcp->count, pcp);
1017 pcp->count = 0; 1017 pcp->count = 0;
1018 local_irq_restore(flags); 1018 local_irq_restore(flags);
@@ -1096,7 +1096,6 @@ static void free_hot_cold_page(struct page *page, int cold)
1096 arch_free_page(page, 0); 1096 arch_free_page(page, 0);
1097 kernel_map_pages(page, 1, 0); 1097 kernel_map_pages(page, 1, 0);
1098 1098
1099 pcp = &zone_pcp(zone, get_cpu())->pcp;
1100 migratetype = get_pageblock_migratetype(page); 1099 migratetype = get_pageblock_migratetype(page);
1101 set_page_private(page, migratetype); 1100 set_page_private(page, migratetype);
1102 local_irq_save(flags); 1101 local_irq_save(flags);
@@ -1119,6 +1118,7 @@ static void free_hot_cold_page(struct page *page, int cold)
1119 migratetype = MIGRATE_MOVABLE; 1118 migratetype = MIGRATE_MOVABLE;
1120 } 1119 }
1121 1120
1121 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1122 if (cold) 1122 if (cold)
1123 list_add_tail(&page->lru, &pcp->lists[migratetype]); 1123 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1124 else 1124 else
@@ -1131,7 +1131,6 @@ static void free_hot_cold_page(struct page *page, int cold)
1131 1131
1132out: 1132out:
1133 local_irq_restore(flags); 1133 local_irq_restore(flags);
1134 put_cpu();
1135} 1134}
1136 1135
1137void free_hot_page(struct page *page) 1136void free_hot_page(struct page *page)
@@ -1181,17 +1180,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
1181 unsigned long flags; 1180 unsigned long flags;
1182 struct page *page; 1181 struct page *page;
1183 int cold = !!(gfp_flags & __GFP_COLD); 1182 int cold = !!(gfp_flags & __GFP_COLD);
1184 int cpu;
1185 1183
1186again: 1184again:
1187 cpu = get_cpu();
1188 if (likely(order == 0)) { 1185 if (likely(order == 0)) {
1189 struct per_cpu_pages *pcp; 1186 struct per_cpu_pages *pcp;
1190 struct list_head *list; 1187 struct list_head *list;
1191 1188
1192 pcp = &zone_pcp(zone, cpu)->pcp;
1193 list = &pcp->lists[migratetype];
1194 local_irq_save(flags); 1189 local_irq_save(flags);
1190 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1191 list = &pcp->lists[migratetype];
1195 if (list_empty(list)) { 1192 if (list_empty(list)) {
1196 pcp->count += rmqueue_bulk(zone, 0, 1193 pcp->count += rmqueue_bulk(zone, 0,
1197 pcp->batch, list, 1194 pcp->batch, list,
@@ -1232,7 +1229,6 @@ again:
1232 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1229 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1233 zone_statistics(preferred_zone, zone); 1230 zone_statistics(preferred_zone, zone);
1234 local_irq_restore(flags); 1231 local_irq_restore(flags);
1235 put_cpu();
1236 1232
1237 VM_BUG_ON(bad_range(zone, page)); 1233 VM_BUG_ON(bad_range(zone, page));
1238 if (prep_new_page(page, order, gfp_flags)) 1234 if (prep_new_page(page, order, gfp_flags))
@@ -1241,7 +1237,6 @@ again:
1241 1237
1242failed: 1238failed:
1243 local_irq_restore(flags); 1239 local_irq_restore(flags);
1244 put_cpu();
1245 return NULL; 1240 return NULL;
1246} 1241}
1247 1242
@@ -2180,7 +2175,7 @@ void show_free_areas(void)
2180 for_each_online_cpu(cpu) { 2175 for_each_online_cpu(cpu) {
2181 struct per_cpu_pageset *pageset; 2176 struct per_cpu_pageset *pageset;
2182 2177
2183 pageset = zone_pcp(zone, cpu); 2178 pageset = per_cpu_ptr(zone->pageset, cpu);
2184 2179
2185 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 2180 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2186 cpu, pageset->pcp.high, 2181 cpu, pageset->pcp.high,
@@ -2745,10 +2740,29 @@ static void build_zonelist_cache(pg_data_t *pgdat)
2745 2740
2746#endif /* CONFIG_NUMA */ 2741#endif /* CONFIG_NUMA */
2747 2742
2743/*
2744 * Boot pageset table. One per cpu which is going to be used for all
2745 * zones and all nodes. The parameters will be set in such a way
2746 * that an item put on a list will immediately be handed over to
2747 * the buddy list. This is safe since pageset manipulation is done
2748 * with interrupts disabled.
2749 *
2750 * The boot_pagesets must be kept even after bootup is complete for
2751 * unused processors and/or zones. They do play a role for bootstrapping
2752 * hotplugged processors.
2753 *
2754 * zoneinfo_show() and maybe other functions do
2755 * not check if the processor is online before following the pageset pointer.
2756 * Other parts of the kernel may not check if the zone is available.
2757 */
2758static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
2759static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
2760
2748/* return values int ....just for stop_machine() */ 2761/* return values int ....just for stop_machine() */
2749static int __build_all_zonelists(void *dummy) 2762static int __build_all_zonelists(void *dummy)
2750{ 2763{
2751 int nid; 2764 int nid;
2765 int cpu;
2752 2766
2753#ifdef CONFIG_NUMA 2767#ifdef CONFIG_NUMA
2754 memset(node_load, 0, sizeof(node_load)); 2768 memset(node_load, 0, sizeof(node_load));
@@ -2759,6 +2773,23 @@ static int __build_all_zonelists(void *dummy)
2759 build_zonelists(pgdat); 2773 build_zonelists(pgdat);
2760 build_zonelist_cache(pgdat); 2774 build_zonelist_cache(pgdat);
2761 } 2775 }
2776
2777 /*
2778 * Initialize the boot_pagesets that are going to be used
2779 * for bootstrapping processors. The real pagesets for
2780 * each zone will be allocated later when the per cpu
2781 * allocator is available.
2782 *
2783 * boot_pagesets are used also for bootstrapping offline
2784 * cpus if the system is already booted because the pagesets
2785 * are needed to initialize allocators on a specific cpu too.
2786 * F.e. the percpu allocator needs the page allocator which
2787 * needs the percpu allocator in order to allocate its pagesets
2788 * (a chicken-egg dilemma).
2789 */
2790 for_each_possible_cpu(cpu)
2791 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
2792
2762 return 0; 2793 return 0;
2763} 2794}
2764 2795
@@ -3096,121 +3127,33 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3096 pcp->batch = PAGE_SHIFT * 8; 3127 pcp->batch = PAGE_SHIFT * 8;
3097} 3128}
3098 3129
3099
3100#ifdef CONFIG_NUMA
3101/*
3102 * Boot pageset table. One per cpu which is going to be used for all
3103 * zones and all nodes. The parameters will be set in such a way
3104 * that an item put on a list will immediately be handed over to
3105 * the buddy list. This is safe since pageset manipulation is done
3106 * with interrupts disabled.
3107 *
3108 * Some NUMA counter updates may also be caught by the boot pagesets.
3109 *
3110 * The boot_pagesets must be kept even after bootup is complete for
3111 * unused processors and/or zones. They do play a role for bootstrapping
3112 * hotplugged processors.
3113 *
3114 * zoneinfo_show() and maybe other functions do
3115 * not check if the processor is online before following the pageset pointer.
3116 * Other parts of the kernel may not check if the zone is available.
3117 */
3118static struct per_cpu_pageset boot_pageset[NR_CPUS];
3119
3120/* 3130/*
3121 * Dynamically allocate memory for the 3131 * Allocate per cpu pagesets and initialize them.
3122 * per cpu pageset array in struct zone. 3132 * Before this call only boot pagesets were available.
3133 * Boot pagesets will no longer be used by this processorr
3134 * after setup_per_cpu_pageset().
3123 */ 3135 */
3124static int __cpuinit process_zones(int cpu) 3136void __init setup_per_cpu_pageset(void)
3125{ 3137{
3126 struct zone *zone, *dzone; 3138 struct zone *zone;
3127 int node = cpu_to_node(cpu); 3139 int cpu;
3128
3129 node_set_state(node, N_CPU); /* this node has a cpu */
3130 3140
3131 for_each_populated_zone(zone) { 3141 for_each_populated_zone(zone) {
3132 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 3142 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3133 GFP_KERNEL, node);
3134 if (!zone_pcp(zone, cpu))
3135 goto bad;
3136 3143
3137 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 3144 for_each_possible_cpu(cpu) {
3145 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3138 3146
3139 if (percpu_pagelist_fraction) 3147 setup_pageset(pcp, zone_batchsize(zone));
3140 setup_pagelist_highmark(zone_pcp(zone, cpu),
3141 (zone->present_pages / percpu_pagelist_fraction));
3142 }
3143 3148
3144 return 0; 3149 if (percpu_pagelist_fraction)
3145bad: 3150 setup_pagelist_highmark(pcp,
3146 for_each_zone(dzone) { 3151 (zone->present_pages /
3147 if (!populated_zone(dzone)) 3152 percpu_pagelist_fraction));
3148 continue; 3153 }
3149 if (dzone == zone)
3150 break;
3151 kfree(zone_pcp(dzone, cpu));
3152 zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3153 }
3154 return -ENOMEM;
3155}
3156
3157static inline void free_zone_pagesets(int cpu)
3158{
3159 struct zone *zone;
3160
3161 for_each_zone(zone) {
3162 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3163
3164 /* Free per_cpu_pageset if it is slab allocated */
3165 if (pset != &boot_pageset[cpu])
3166 kfree(pset);
3167 zone_pcp(zone, cpu) = &boot_pageset[cpu];
3168 }
3169}
3170
3171static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3172 unsigned long action,
3173 void *hcpu)
3174{
3175 int cpu = (long)hcpu;
3176 int ret = NOTIFY_OK;
3177
3178 switch (action) {
3179 case CPU_UP_PREPARE:
3180 case CPU_UP_PREPARE_FROZEN:
3181 if (process_zones(cpu))
3182 ret = NOTIFY_BAD;
3183 break;
3184 case CPU_UP_CANCELED:
3185 case CPU_UP_CANCELED_FROZEN:
3186 case CPU_DEAD:
3187 case CPU_DEAD_FROZEN:
3188 free_zone_pagesets(cpu);
3189 break;
3190 default:
3191 break;
3192 } 3154 }
3193 return ret;
3194}
3195
3196static struct notifier_block __cpuinitdata pageset_notifier =
3197 { &pageset_cpuup_callback, NULL, 0 };
3198
3199void __init setup_per_cpu_pageset(void)
3200{
3201 int err;
3202
3203 /* Initialize per_cpu_pageset for cpu 0.
3204 * A cpuup callback will do this for every cpu
3205 * as it comes online
3206 */
3207 err = process_zones(smp_processor_id());
3208 BUG_ON(err);
3209 register_cpu_notifier(&pageset_notifier);
3210} 3155}
3211 3156
3212#endif
3213
3214static noinline __init_refok 3157static noinline __init_refok
3215int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 3158int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3216{ 3159{
@@ -3264,7 +3207,7 @@ static int __zone_pcp_update(void *data)
3264 struct per_cpu_pageset *pset; 3207 struct per_cpu_pageset *pset;
3265 struct per_cpu_pages *pcp; 3208 struct per_cpu_pages *pcp;
3266 3209
3267 pset = zone_pcp(zone, cpu); 3210 pset = per_cpu_ptr(zone->pageset, cpu);
3268 pcp = &pset->pcp; 3211 pcp = &pset->pcp;
3269 3212
3270 local_irq_save(flags); 3213 local_irq_save(flags);
@@ -3282,21 +3225,17 @@ void zone_pcp_update(struct zone *zone)
3282 3225
3283static __meminit void zone_pcp_init(struct zone *zone) 3226static __meminit void zone_pcp_init(struct zone *zone)
3284{ 3227{
3285 int cpu; 3228 /*
3286 unsigned long batch = zone_batchsize(zone); 3229 * per cpu subsystem is not up at this point. The following code
3230 * relies on the ability of the linker to provide the
3231 * offset of a (static) per cpu variable into the per cpu area.
3232 */
3233 zone->pageset = &boot_pageset;
3287 3234
3288 for (cpu = 0; cpu < NR_CPUS; cpu++) {
3289#ifdef CONFIG_NUMA
3290 /* Early boot. Slab allocator not functional yet */
3291 zone_pcp(zone, cpu) = &boot_pageset[cpu];
3292 setup_pageset(&boot_pageset[cpu],0);
3293#else
3294 setup_pageset(zone_pcp(zone,cpu), batch);
3295#endif
3296 }
3297 if (zone->present_pages) 3235 if (zone->present_pages)
3298 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 3236 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3299 zone->name, zone->present_pages, batch); 3237 zone->name, zone->present_pages,
3238 zone_batchsize(zone));
3300} 3239}
3301 3240
3302__meminit int init_currently_empty_zone(struct zone *zone, 3241__meminit int init_currently_empty_zone(struct zone *zone,
@@ -3435,6 +3374,61 @@ void __init free_bootmem_with_active_regions(int nid,
3435 } 3374 }
3436} 3375}
3437 3376
3377int __init add_from_early_node_map(struct range *range, int az,
3378 int nr_range, int nid)
3379{
3380 int i;
3381 u64 start, end;
3382
3383 /* need to go over early_node_map to find out good range for node */
3384 for_each_active_range_index_in_nid(i, nid) {
3385 start = early_node_map[i].start_pfn;
3386 end = early_node_map[i].end_pfn;
3387 nr_range = add_range(range, az, nr_range, start, end);
3388 }
3389 return nr_range;
3390}
3391
3392#ifdef CONFIG_NO_BOOTMEM
3393void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3394 u64 goal, u64 limit)
3395{
3396 int i;
3397 void *ptr;
3398
3399 /* need to go over early_node_map to find out good range for node */
3400 for_each_active_range_index_in_nid(i, nid) {
3401 u64 addr;
3402 u64 ei_start, ei_last;
3403
3404 ei_last = early_node_map[i].end_pfn;
3405 ei_last <<= PAGE_SHIFT;
3406 ei_start = early_node_map[i].start_pfn;
3407 ei_start <<= PAGE_SHIFT;
3408 addr = find_early_area(ei_start, ei_last,
3409 goal, limit, size, align);
3410
3411 if (addr == -1ULL)
3412 continue;
3413
3414#if 0
3415 printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
3416 nid,
3417 ei_start, ei_last, goal, limit, size,
3418 align, addr);
3419#endif
3420
3421 ptr = phys_to_virt(addr);
3422 memset(ptr, 0, size);
3423 reserve_early_without_check(addr, addr + size, "BOOTMEM");
3424 return ptr;
3425 }
3426
3427 return NULL;
3428}
3429#endif
3430
3431
3438void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) 3432void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3439{ 3433{
3440 int i; 3434 int i;
@@ -4467,7 +4461,11 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4467} 4461}
4468 4462
4469#ifndef CONFIG_NEED_MULTIPLE_NODES 4463#ifndef CONFIG_NEED_MULTIPLE_NODES
4470struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; 4464struct pglist_data __refdata contig_page_data = {
4465#ifndef CONFIG_NO_BOOTMEM
4466 .bdata = &bootmem_node_data[0]
4467#endif
4468 };
4471EXPORT_SYMBOL(contig_page_data); 4469EXPORT_SYMBOL(contig_page_data);
4472#endif 4470#endif
4473 4471
@@ -4810,10 +4808,11 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4810 if (!write || (ret == -EINVAL)) 4808 if (!write || (ret == -EINVAL))
4811 return ret; 4809 return ret;
4812 for_each_populated_zone(zone) { 4810 for_each_populated_zone(zone) {
4813 for_each_online_cpu(cpu) { 4811 for_each_possible_cpu(cpu) {
4814 unsigned long high; 4812 unsigned long high;
4815 high = zone->present_pages / percpu_pagelist_fraction; 4813 high = zone->present_pages / percpu_pagelist_fraction;
4816 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 4814 setup_pagelist_highmark(
4815 per_cpu_ptr(zone->pageset, cpu), high);
4817 } 4816 }
4818 } 4817 }
4819 return 0; 4818 return 0;
diff --git a/mm/percpu.c b/mm/percpu.c
index 083e7c91e5f6..768419d44ad7 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -80,13 +80,15 @@
80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81#ifndef __addr_to_pcpu_ptr 81#ifndef __addr_to_pcpu_ptr
82#define __addr_to_pcpu_ptr(addr) \ 82#define __addr_to_pcpu_ptr(addr) \
83 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ 83 (void __percpu *)((unsigned long)(addr) - \
84 + (unsigned long)__per_cpu_start) 84 (unsigned long)pcpu_base_addr + \
85 (unsigned long)__per_cpu_start)
85#endif 86#endif
86#ifndef __pcpu_ptr_to_addr 87#ifndef __pcpu_ptr_to_addr
87#define __pcpu_ptr_to_addr(ptr) \ 88#define __pcpu_ptr_to_addr(ptr) \
88 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ 89 (void __force *)((unsigned long)(ptr) + \
89 - (unsigned long)__per_cpu_start) 90 (unsigned long)pcpu_base_addr - \
91 (unsigned long)__per_cpu_start)
90#endif 92#endif
91 93
92struct pcpu_chunk { 94struct pcpu_chunk {
@@ -913,11 +915,10 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
913 int rs, re; 915 int rs, re;
914 916
915 /* quick path, check whether it's empty already */ 917 /* quick path, check whether it's empty already */
916 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 918 rs = page_start;
917 if (rs == page_start && re == page_end) 919 pcpu_next_unpop(chunk, &rs, &re, page_end);
918 return; 920 if (rs == page_start && re == page_end)
919 break; 921 return;
920 }
921 922
922 /* immutable chunks can't be depopulated */ 923 /* immutable chunks can't be depopulated */
923 WARN_ON(chunk->immutable); 924 WARN_ON(chunk->immutable);
@@ -968,11 +969,10 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
968 int rs, re, rc; 969 int rs, re, rc;
969 970
970 /* quick path, check whether all pages are already there */ 971 /* quick path, check whether all pages are already there */
971 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { 972 rs = page_start;
972 if (rs == page_start && re == page_end) 973 pcpu_next_pop(chunk, &rs, &re, page_end);
973 goto clear; 974 if (rs == page_start && re == page_end)
974 break; 975 goto clear;
975 }
976 976
977 /* need to allocate and map pages, this chunk can't be immutable */ 977 /* need to allocate and map pages, this chunk can't be immutable */
978 WARN_ON(chunk->immutable); 978 WARN_ON(chunk->immutable);
@@ -1067,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
1067 * RETURNS: 1067 * RETURNS:
1068 * Percpu pointer to the allocated area on success, NULL on failure. 1068 * Percpu pointer to the allocated area on success, NULL on failure.
1069 */ 1069 */
1070static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1070static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
1071{ 1071{
1072 static int warn_limit = 10; 1072 static int warn_limit = 10;
1073 struct pcpu_chunk *chunk; 1073 struct pcpu_chunk *chunk;
@@ -1196,7 +1196,7 @@ fail_unlock_mutex:
1196 * RETURNS: 1196 * RETURNS:
1197 * Percpu pointer to the allocated area on success, NULL on failure. 1197 * Percpu pointer to the allocated area on success, NULL on failure.
1198 */ 1198 */
1199void *__alloc_percpu(size_t size, size_t align) 1199void __percpu *__alloc_percpu(size_t size, size_t align)
1200{ 1200{
1201 return pcpu_alloc(size, align, false); 1201 return pcpu_alloc(size, align, false);
1202} 1202}
@@ -1217,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
1217 * RETURNS: 1217 * RETURNS:
1218 * Percpu pointer to the allocated area on success, NULL on failure. 1218 * Percpu pointer to the allocated area on success, NULL on failure.
1219 */ 1219 */
1220void *__alloc_reserved_percpu(size_t size, size_t align) 1220void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1221{ 1221{
1222 return pcpu_alloc(size, align, true); 1222 return pcpu_alloc(size, align, true);
1223} 1223}
@@ -1269,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
1269 * CONTEXT: 1269 * CONTEXT:
1270 * Can be called from atomic context. 1270 * Can be called from atomic context.
1271 */ 1271 */
1272void free_percpu(void *ptr) 1272void free_percpu(void __percpu *ptr)
1273{ 1273{
1274 void *addr; 1274 void *addr;
1275 struct pcpu_chunk *chunk; 1275 struct pcpu_chunk *chunk;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d9714bdcb4a3..392b9bb5bc01 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -40,9 +40,11 @@ static void * __init_refok __earlyonly_bootmem_alloc(int node,
40 unsigned long align, 40 unsigned long align,
41 unsigned long goal) 41 unsigned long goal)
42{ 42{
43 return __alloc_bootmem_node(NODE_DATA(node), size, align, goal); 43 return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
44} 44}
45 45
46static void *vmemmap_buf;
47static void *vmemmap_buf_end;
46 48
47void * __meminit vmemmap_alloc_block(unsigned long size, int node) 49void * __meminit vmemmap_alloc_block(unsigned long size, int node)
48{ 50{
@@ -64,6 +66,24 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
64 __pa(MAX_DMA_ADDRESS)); 66 __pa(MAX_DMA_ADDRESS));
65} 67}
66 68
69/* need to make sure size is all the same during early stage */
70void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
71{
72 void *ptr;
73
74 if (!vmemmap_buf)
75 return vmemmap_alloc_block(size, node);
76
77 /* take the from buf */
78 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
79 if (ptr + size > vmemmap_buf_end)
80 return vmemmap_alloc_block(size, node);
81
82 vmemmap_buf = ptr + size;
83
84 return ptr;
85}
86
67void __meminit vmemmap_verify(pte_t *pte, int node, 87void __meminit vmemmap_verify(pte_t *pte, int node,
68 unsigned long start, unsigned long end) 88 unsigned long start, unsigned long end)
69{ 89{
@@ -80,7 +100,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
80 pte_t *pte = pte_offset_kernel(pmd, addr); 100 pte_t *pte = pte_offset_kernel(pmd, addr);
81 if (pte_none(*pte)) { 101 if (pte_none(*pte)) {
82 pte_t entry; 102 pte_t entry;
83 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 103 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
84 if (!p) 104 if (!p)
85 return NULL; 105 return NULL;
86 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 106 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
@@ -163,3 +183,55 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
163 183
164 return map; 184 return map;
165} 185}
186
187void __init sparse_mem_maps_populate_node(struct page **map_map,
188 unsigned long pnum_begin,
189 unsigned long pnum_end,
190 unsigned long map_count, int nodeid)
191{
192 unsigned long pnum;
193 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
194 void *vmemmap_buf_start;
195
196 size = ALIGN(size, PMD_SIZE);
197 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
198 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
199
200 if (vmemmap_buf_start) {
201 vmemmap_buf = vmemmap_buf_start;
202 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
203 }
204
205 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
206 struct mem_section *ms;
207
208 if (!present_section_nr(pnum))
209 continue;
210
211 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
212 if (map_map[pnum])
213 continue;
214 ms = __nr_to_section(pnum);
215 printk(KERN_ERR "%s: sparsemem memory map backing failed "
216 "some memory will not be available.\n", __func__);
217 ms->section_mem_map = 0;
218 }
219
220 if (vmemmap_buf_start) {
221 /* need to free left buf */
222#ifdef CONFIG_NO_BOOTMEM
223 free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end));
224 if (vmemmap_buf_start < vmemmap_buf) {
225 char name[15];
226
227 snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
228 reserve_early_without_check(__pa(vmemmap_buf_start),
229 __pa(vmemmap_buf), name);
230 }
231#else
232 free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
233#endif
234 vmemmap_buf = NULL;
235 vmemmap_buf_end = NULL;
236 }
237}
diff --git a/mm/sparse.c b/mm/sparse.c
index 6ce4aab69e99..22896d589133 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -271,7 +271,8 @@ static unsigned long *__kmalloc_section_usemap(void)
271 271
272#ifdef CONFIG_MEMORY_HOTREMOVE 272#ifdef CONFIG_MEMORY_HOTREMOVE
273static unsigned long * __init 273static unsigned long * __init
274sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) 274sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
275 unsigned long count)
275{ 276{
276 unsigned long section_nr; 277 unsigned long section_nr;
277 278
@@ -286,7 +287,7 @@ sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
286 * this problem. 287 * this problem.
287 */ 288 */
288 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 289 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
289 return alloc_bootmem_section(usemap_size(), section_nr); 290 return alloc_bootmem_section(usemap_size() * count, section_nr);
290} 291}
291 292
292static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 293static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -329,7 +330,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
329} 330}
330#else 331#else
331static unsigned long * __init 332static unsigned long * __init
332sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) 333sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
334 unsigned long count)
333{ 335{
334 return NULL; 336 return NULL;
335} 337}
@@ -339,27 +341,40 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
339} 341}
340#endif /* CONFIG_MEMORY_HOTREMOVE */ 342#endif /* CONFIG_MEMORY_HOTREMOVE */
341 343
342static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) 344static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
345 unsigned long pnum_begin,
346 unsigned long pnum_end,
347 unsigned long usemap_count, int nodeid)
343{ 348{
344 unsigned long *usemap; 349 void *usemap;
345 struct mem_section *ms = __nr_to_section(pnum); 350 unsigned long pnum;
346 int nid = sparse_early_nid(ms); 351 int size = usemap_size();
347
348 usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
349 if (usemap)
350 return usemap;
351 352
352 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); 353 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
354 usemap_count);
353 if (usemap) { 355 if (usemap) {
354 check_usemap_section_nr(nid, usemap); 356 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
355 return usemap; 357 if (!present_section_nr(pnum))
358 continue;
359 usemap_map[pnum] = usemap;
360 usemap += size;
361 }
362 return;
356 } 363 }
357 364
358 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ 365 usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
359 nid = 0; 366 if (usemap) {
367 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
368 if (!present_section_nr(pnum))
369 continue;
370 usemap_map[pnum] = usemap;
371 usemap += size;
372 check_usemap_section_nr(nodeid, usemap_map[pnum]);
373 }
374 return;
375 }
360 376
361 printk(KERN_WARNING "%s: allocation failed\n", __func__); 377 printk(KERN_WARNING "%s: allocation failed\n", __func__);
362 return NULL;
363} 378}
364 379
365#ifndef CONFIG_SPARSEMEM_VMEMMAP 380#ifndef CONFIG_SPARSEMEM_VMEMMAP
@@ -375,8 +390,65 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
375 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); 390 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
376 return map; 391 return map;
377} 392}
393void __init sparse_mem_maps_populate_node(struct page **map_map,
394 unsigned long pnum_begin,
395 unsigned long pnum_end,
396 unsigned long map_count, int nodeid)
397{
398 void *map;
399 unsigned long pnum;
400 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
401
402 map = alloc_remap(nodeid, size * map_count);
403 if (map) {
404 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
405 if (!present_section_nr(pnum))
406 continue;
407 map_map[pnum] = map;
408 map += size;
409 }
410 return;
411 }
412
413 size = PAGE_ALIGN(size);
414 map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
415 if (map) {
416 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
417 if (!present_section_nr(pnum))
418 continue;
419 map_map[pnum] = map;
420 map += size;
421 }
422 return;
423 }
424
425 /* fallback */
426 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
427 struct mem_section *ms;
428
429 if (!present_section_nr(pnum))
430 continue;
431 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
432 if (map_map[pnum])
433 continue;
434 ms = __nr_to_section(pnum);
435 printk(KERN_ERR "%s: sparsemem memory map backing failed "
436 "some memory will not be available.\n", __func__);
437 ms->section_mem_map = 0;
438 }
439}
378#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 440#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
379 441
442#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
443static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
444 unsigned long pnum_begin,
445 unsigned long pnum_end,
446 unsigned long map_count, int nodeid)
447{
448 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
449 map_count, nodeid);
450}
451#else
380static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) 452static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
381{ 453{
382 struct page *map; 454 struct page *map;
@@ -392,10 +464,12 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
392 ms->section_mem_map = 0; 464 ms->section_mem_map = 0;
393 return NULL; 465 return NULL;
394} 466}
467#endif
395 468
396void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) 469void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
397{ 470{
398} 471}
472
399/* 473/*
400 * Allocate the accumulated non-linear sections, allocate a mem_map 474 * Allocate the accumulated non-linear sections, allocate a mem_map
401 * for each and record the physical to section mapping. 475 * for each and record the physical to section mapping.
@@ -407,6 +481,14 @@ void __init sparse_init(void)
407 unsigned long *usemap; 481 unsigned long *usemap;
408 unsigned long **usemap_map; 482 unsigned long **usemap_map;
409 int size; 483 int size;
484 int nodeid_begin = 0;
485 unsigned long pnum_begin = 0;
486 unsigned long usemap_count;
487#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
488 unsigned long map_count;
489 int size2;
490 struct page **map_map;
491#endif
410 492
411 /* 493 /*
412 * map is using big page (aka 2M in x86 64 bit) 494 * map is using big page (aka 2M in x86 64 bit)
@@ -425,10 +507,81 @@ void __init sparse_init(void)
425 panic("can not allocate usemap_map\n"); 507 panic("can not allocate usemap_map\n");
426 508
427 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 509 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
510 struct mem_section *ms;
511
428 if (!present_section_nr(pnum)) 512 if (!present_section_nr(pnum))
429 continue; 513 continue;
430 usemap_map[pnum] = sparse_early_usemap_alloc(pnum); 514 ms = __nr_to_section(pnum);
515 nodeid_begin = sparse_early_nid(ms);
516 pnum_begin = pnum;
517 break;
431 } 518 }
519 usemap_count = 1;
520 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
521 struct mem_section *ms;
522 int nodeid;
523
524 if (!present_section_nr(pnum))
525 continue;
526 ms = __nr_to_section(pnum);
527 nodeid = sparse_early_nid(ms);
528 if (nodeid == nodeid_begin) {
529 usemap_count++;
530 continue;
531 }
532 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
533 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
534 usemap_count, nodeid_begin);
535 /* new start, update count etc*/
536 nodeid_begin = nodeid;
537 pnum_begin = pnum;
538 usemap_count = 1;
539 }
540 /* ok, last chunk */
541 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
542 usemap_count, nodeid_begin);
543
544#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
545 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
546 map_map = alloc_bootmem(size2);
547 if (!map_map)
548 panic("can not allocate map_map\n");
549
550 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
551 struct mem_section *ms;
552
553 if (!present_section_nr(pnum))
554 continue;
555 ms = __nr_to_section(pnum);
556 nodeid_begin = sparse_early_nid(ms);
557 pnum_begin = pnum;
558 break;
559 }
560 map_count = 1;
561 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
562 struct mem_section *ms;
563 int nodeid;
564
565 if (!present_section_nr(pnum))
566 continue;
567 ms = __nr_to_section(pnum);
568 nodeid = sparse_early_nid(ms);
569 if (nodeid == nodeid_begin) {
570 map_count++;
571 continue;
572 }
573 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
574 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
575 map_count, nodeid_begin);
576 /* new start, update count etc*/
577 nodeid_begin = nodeid;
578 pnum_begin = pnum;
579 map_count = 1;
580 }
581 /* ok, last chunk */
582 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
583 map_count, nodeid_begin);
584#endif
432 585
433 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 586 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
434 if (!present_section_nr(pnum)) 587 if (!present_section_nr(pnum))
@@ -438,7 +591,11 @@ void __init sparse_init(void)
438 if (!usemap) 591 if (!usemap)
439 continue; 592 continue;
440 593
594#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
595 map = map_map[pnum];
596#else
441 map = sparse_early_mem_map_alloc(pnum); 597 map = sparse_early_mem_map_alloc(pnum);
598#endif
442 if (!map) 599 if (!map)
443 continue; 600 continue;
444 601
@@ -448,6 +605,9 @@ void __init sparse_init(void)
448 605
449 vmemmap_populate_print_last(); 606 vmemmap_populate_print_last();
450 607
608#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
609 free_bootmem(__pa(map_map), size2);
610#endif
451 free_bootmem(__pa(usemap_map), size); 611 free_bootmem(__pa(usemap_map), size);
452} 612}
453 613
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6051fbab67ba..fc5aa183bc45 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -139,7 +139,8 @@ static void refresh_zone_stat_thresholds(void)
139 threshold = calculate_threshold(zone); 139 threshold = calculate_threshold(zone);
140 140
141 for_each_online_cpu(cpu) 141 for_each_online_cpu(cpu)
142 zone_pcp(zone, cpu)->stat_threshold = threshold; 142 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
143 = threshold;
143 } 144 }
144} 145}
145 146
@@ -149,7 +150,8 @@ static void refresh_zone_stat_thresholds(void)
149void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 150void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
150 int delta) 151 int delta)
151{ 152{
152 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 153 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
154
153 s8 *p = pcp->vm_stat_diff + item; 155 s8 *p = pcp->vm_stat_diff + item;
154 long x; 156 long x;
155 157
@@ -202,7 +204,7 @@ EXPORT_SYMBOL(mod_zone_page_state);
202 */ 204 */
203void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 205void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
204{ 206{
205 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 207 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
206 s8 *p = pcp->vm_stat_diff + item; 208 s8 *p = pcp->vm_stat_diff + item;
207 209
208 (*p)++; 210 (*p)++;
@@ -223,7 +225,7 @@ EXPORT_SYMBOL(__inc_zone_page_state);
223 225
224void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 226void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
225{ 227{
226 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 228 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
227 s8 *p = pcp->vm_stat_diff + item; 229 s8 *p = pcp->vm_stat_diff + item;
228 230
229 (*p)--; 231 (*p)--;
@@ -300,7 +302,7 @@ void refresh_cpu_vm_stats(int cpu)
300 for_each_populated_zone(zone) { 302 for_each_populated_zone(zone) {
301 struct per_cpu_pageset *p; 303 struct per_cpu_pageset *p;
302 304
303 p = zone_pcp(zone, cpu); 305 p = per_cpu_ptr(zone->pageset, cpu);
304 306
305 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 307 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
306 if (p->vm_stat_diff[i]) { 308 if (p->vm_stat_diff[i]) {
@@ -741,7 +743,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
741 for_each_online_cpu(i) { 743 for_each_online_cpu(i) {
742 struct per_cpu_pageset *pageset; 744 struct per_cpu_pageset *pageset;
743 745
744 pageset = zone_pcp(zone, i); 746 pageset = per_cpu_ptr(zone->pageset, i);
745 seq_printf(m, 747 seq_printf(m,
746 "\n cpu: %i" 748 "\n cpu: %i"
747 "\n count: %i" 749 "\n count: %i"
@@ -906,6 +908,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
906 case CPU_ONLINE: 908 case CPU_ONLINE:
907 case CPU_ONLINE_FROZEN: 909 case CPU_ONLINE_FROZEN:
908 start_cpu_timer(cpu); 910 start_cpu_timer(cpu);
911 node_set_state(cpu_to_node(cpu), N_CPU);
909 break; 912 break;
910 case CPU_DOWN_PREPARE: 913 case CPU_DOWN_PREPARE:
911 case CPU_DOWN_PREPARE_FROZEN: 914 case CPU_DOWN_PREPARE_FROZEN:
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9ea45383480e..8d63f8fd29b7 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -999,19 +999,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
999 inode = rpc_get_inode(sb, S_IFDIR | 0755); 999 inode = rpc_get_inode(sb, S_IFDIR | 0755);
1000 if (!inode) 1000 if (!inode)
1001 return -ENOMEM; 1001 return -ENOMEM;
1002 root = d_alloc_root(inode); 1002 sb->s_root = root = d_alloc_root(inode);
1003 if (!root) { 1003 if (!root) {
1004 iput(inode); 1004 iput(inode);
1005 return -ENOMEM; 1005 return -ENOMEM;
1006 } 1006 }
1007 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) 1007 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
1008 goto out; 1008 return -ENOMEM;
1009 sb->s_root = root;
1010 return 0; 1009 return 0;
1011out:
1012 d_genocide(root);
1013 dput(root);
1014 return -ENOMEM;
1015} 1010}
1016 1011
1017static int 1012static int
diff --git a/security/capability.c b/security/capability.c
index 5c700e1a4fd3..4875142b858d 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -906,10 +906,6 @@ static void cap_audit_rule_free(void *lsmrule)
906} 906}
907#endif /* CONFIG_AUDIT */ 907#endif /* CONFIG_AUDIT */
908 908
909struct security_operations default_security_ops = {
910 .name = "default",
911};
912
913#define set_to_cap_if_null(ops, function) \ 909#define set_to_cap_if_null(ops, function) \
914 do { \ 910 do { \
915 if (!ops->function) { \ 911 if (!ops->function) { \
diff --git a/security/commoncap.c b/security/commoncap.c
index f800fdb3de94..61669730da98 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -27,6 +27,7 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/prctl.h> 28#include <linux/prctl.h>
29#include <linux/securebits.h> 29#include <linux/securebits.h>
30#include <linux/syslog.h>
30 31
31/* 32/*
32 * If a non-root user executes a setuid-root binary in 33 * If a non-root user executes a setuid-root binary in
@@ -888,13 +889,17 @@ error:
888/** 889/**
889 * cap_syslog - Determine whether syslog function is permitted 890 * cap_syslog - Determine whether syslog function is permitted
890 * @type: Function requested 891 * @type: Function requested
892 * @from_file: Whether this request came from an open file (i.e. /proc)
891 * 893 *
892 * Determine whether the current process is permitted to use a particular 894 * Determine whether the current process is permitted to use a particular
893 * syslog function, returning 0 if permission is granted, -ve if not. 895 * syslog function, returning 0 if permission is granted, -ve if not.
894 */ 896 */
895int cap_syslog(int type) 897int cap_syslog(int type, bool from_file)
896{ 898{
897 if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN)) 899 if (type != SYSLOG_ACTION_OPEN && from_file)
900 return 0;
901 if ((type != SYSLOG_ACTION_READ_ALL &&
902 type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
898 return -EPERM; 903 return -EPERM;
899 return 0; 904 return 0;
900} 905}
diff --git a/security/security.c b/security/security.c
index 122b748d0f4c..687c6fd14bb6 100644
--- a/security/security.c
+++ b/security/security.c
@@ -23,10 +23,12 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
23 CONFIG_DEFAULT_SECURITY; 23 CONFIG_DEFAULT_SECURITY;
24 24
25/* things that live in capability.c */ 25/* things that live in capability.c */
26extern struct security_operations default_security_ops;
27extern void security_fixup_ops(struct security_operations *ops); 26extern void security_fixup_ops(struct security_operations *ops);
28 27
29struct security_operations *security_ops; /* Initialized to NULL */ 28static struct security_operations *security_ops;
29static struct security_operations default_security_ops = {
30 .name = "default",
31};
30 32
31static inline int verify(struct security_operations *ops) 33static inline int verify(struct security_operations *ops)
32{ 34{
@@ -63,6 +65,11 @@ int __init security_init(void)
63 return 0; 65 return 0;
64} 66}
65 67
68void reset_security_ops(void)
69{
70 security_ops = &default_security_ops;
71}
72
66/* Save user chosen LSM */ 73/* Save user chosen LSM */
67static int __init choose_lsm(char *str) 74static int __init choose_lsm(char *str)
68{ 75{
@@ -203,9 +210,9 @@ int security_quota_on(struct dentry *dentry)
203 return security_ops->quota_on(dentry); 210 return security_ops->quota_on(dentry);
204} 211}
205 212
206int security_syslog(int type) 213int security_syslog(int type, bool from_file)
207{ 214{
208 return security_ops->syslog(type); 215 return security_ops->syslog(type, from_file);
209} 216}
210 217
211int security_settime(struct timespec *ts, struct timezone *tz) 218int security_settime(struct timespec *ts, struct timezone *tz)
@@ -389,42 +396,42 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
389EXPORT_SYMBOL(security_inode_init_security); 396EXPORT_SYMBOL(security_inode_init_security);
390 397
391#ifdef CONFIG_SECURITY_PATH 398#ifdef CONFIG_SECURITY_PATH
392int security_path_mknod(struct path *path, struct dentry *dentry, int mode, 399int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
393 unsigned int dev) 400 unsigned int dev)
394{ 401{
395 if (unlikely(IS_PRIVATE(path->dentry->d_inode))) 402 if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
396 return 0; 403 return 0;
397 return security_ops->path_mknod(path, dentry, mode, dev); 404 return security_ops->path_mknod(dir, dentry, mode, dev);
398} 405}
399EXPORT_SYMBOL(security_path_mknod); 406EXPORT_SYMBOL(security_path_mknod);
400 407
401int security_path_mkdir(struct path *path, struct dentry *dentry, int mode) 408int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
402{ 409{
403 if (unlikely(IS_PRIVATE(path->dentry->d_inode))) 410 if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
404 return 0; 411 return 0;
405 return security_ops->path_mkdir(path, dentry, mode); 412 return security_ops->path_mkdir(dir, dentry, mode);
406} 413}
407 414
408int security_path_rmdir(struct path *path, struct dentry *dentry) 415int security_path_rmdir(struct path *dir, struct dentry *dentry)
409{ 416{
410 if (unlikely(IS_PRIVATE(path->dentry->d_inode))) 417 if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
411 return 0; 418 return 0;
412 return security_ops->path_rmdir(path, dentry); 419 return security_ops->path_rmdir(dir, dentry);
413} 420}
414 421
415int security_path_unlink(struct path *path, struct dentry *dentry) 422int security_path_unlink(struct path *dir, struct dentry *dentry)
416{ 423{
417 if (unlikely(IS_PRIVATE(path->dentry->d_inode))) 424 if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
418 return 0; 425 return 0;
419 return security_ops->path_unlink(path, dentry); 426 return security_ops->path_unlink(dir, dentry);
420} 427}
421 428
422int security_path_symlink(struct path *path, struct dentry *dentry, 429int security_path_symlink(struct path *dir, struct dentry *dentry,
423 const char *old_name) 430 const char *old_name)
424{ 431{
425 if (unlikely(IS_PRIVATE(path->dentry->d_inode))) 432 if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
426 return 0; 433 return 0;
427 return security_ops->path_symlink(path, dentry, old_name); 434 return security_ops->path_symlink(dir, dentry, old_name);
428} 435}
429 436
430int security_path_link(struct dentry *old_dentry, struct path *new_dir, 437int security_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -630,14 +637,14 @@ int security_inode_killpriv(struct dentry *dentry)
630int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc) 637int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
631{ 638{
632 if (unlikely(IS_PRIVATE(inode))) 639 if (unlikely(IS_PRIVATE(inode)))
633 return 0; 640 return -EOPNOTSUPP;
634 return security_ops->inode_getsecurity(inode, name, buffer, alloc); 641 return security_ops->inode_getsecurity(inode, name, buffer, alloc);
635} 642}
636 643
637int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) 644int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
638{ 645{
639 if (unlikely(IS_PRIVATE(inode))) 646 if (unlikely(IS_PRIVATE(inode)))
640 return 0; 647 return -EOPNOTSUPP;
641 return security_ops->inode_setsecurity(inode, name, value, size, flags); 648 return security_ops->inode_setsecurity(inode, name, value, size, flags);
642} 649}
643 650
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index f2dde268165a..db0fd9f33499 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -489,17 +489,14 @@ void avc_audit(u32 ssid, u32 tsid,
489 struct common_audit_data stack_data; 489 struct common_audit_data stack_data;
490 u32 denied, audited; 490 u32 denied, audited;
491 denied = requested & ~avd->allowed; 491 denied = requested & ~avd->allowed;
492 if (denied) { 492 if (denied)
493 audited = denied; 493 audited = denied & avd->auditdeny;
494 if (!(audited & avd->auditdeny)) 494 else if (result)
495 return;
496 } else if (result) {
497 audited = denied = requested; 495 audited = denied = requested;
498 } else { 496 else
499 audited = requested; 497 audited = requested & avd->auditallow;
500 if (!(audited & avd->auditallow)) 498 if (!audited)
501 return; 499 return;
502 }
503 if (!a) { 500 if (!a) {
504 a = &stack_data; 501 a = &stack_data;
505 memset(a, 0, sizeof(*a)); 502 memset(a, 0, sizeof(*a));
@@ -746,9 +743,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
746 else 743 else
747 avd = &avd_entry; 744 avd = &avd_entry;
748 745
749 rc = security_compute_av(ssid, tsid, tclass, requested, avd); 746 security_compute_av(ssid, tsid, tclass, avd);
750 if (rc)
751 goto out;
752 rcu_read_lock(); 747 rcu_read_lock();
753 node = avc_insert(ssid, tsid, tclass, avd); 748 node = avc_insert(ssid, tsid, tclass, avd);
754 } else { 749 } else {
@@ -770,7 +765,6 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
770 } 765 }
771 766
772 rcu_read_unlock(); 767 rcu_read_unlock();
773out:
774 return rc; 768 return rc;
775} 769}
776 770
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 9a2ee845e9d4..5feecb41009d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -76,6 +76,7 @@
76#include <linux/selinux.h> 76#include <linux/selinux.h>
77#include <linux/mutex.h> 77#include <linux/mutex.h>
78#include <linux/posix-timers.h> 78#include <linux/posix-timers.h>
79#include <linux/syslog.h>
79 80
80#include "avc.h" 81#include "avc.h"
81#include "objsec.h" 82#include "objsec.h"
@@ -125,13 +126,6 @@ __setup("selinux=", selinux_enabled_setup);
125int selinux_enabled = 1; 126int selinux_enabled = 1;
126#endif 127#endif
127 128
128
129/*
130 * Minimal support for a secondary security module,
131 * just to allow the use of the capability module.
132 */
133static struct security_operations *secondary_ops;
134
135/* Lists of inode and superblock security structures initialized 129/* Lists of inode and superblock security structures initialized
136 before the policy was loaded. */ 130 before the policy was loaded. */
137static LIST_HEAD(superblock_security_head); 131static LIST_HEAD(superblock_security_head);
@@ -2049,29 +2043,30 @@ static int selinux_quota_on(struct dentry *dentry)
2049 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); 2043 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
2050} 2044}
2051 2045
2052static int selinux_syslog(int type) 2046static int selinux_syslog(int type, bool from_file)
2053{ 2047{
2054 int rc; 2048 int rc;
2055 2049
2056 rc = cap_syslog(type); 2050 rc = cap_syslog(type, from_file);
2057 if (rc) 2051 if (rc)
2058 return rc; 2052 return rc;
2059 2053
2060 switch (type) { 2054 switch (type) {
2061 case 3: /* Read last kernel messages */ 2055 case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
2062 case 10: /* Return size of the log buffer */ 2056 case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
2063 rc = task_has_system(current, SYSTEM__SYSLOG_READ); 2057 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
2064 break; 2058 break;
2065 case 6: /* Disable logging to console */ 2059 case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
2066 case 7: /* Enable logging to console */ 2060 case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
2067 case 8: /* Set level of messages printed to console */ 2061 /* Set level of messages printed to console */
2062 case SYSLOG_ACTION_CONSOLE_LEVEL:
2068 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE); 2063 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
2069 break; 2064 break;
2070 case 0: /* Close log */ 2065 case SYSLOG_ACTION_CLOSE: /* Close log */
2071 case 1: /* Open log */ 2066 case SYSLOG_ACTION_OPEN: /* Open log */
2072 case 2: /* Read from log */ 2067 case SYSLOG_ACTION_READ: /* Read from log */
2073 case 4: /* Read/clear last kernel messages */ 2068 case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
2074 case 5: /* Clear ring buffer */ 2069 case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
2075 default: 2070 default:
2076 rc = task_has_system(current, SYSTEM__SYSLOG_MOD); 2071 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
2077 break; 2072 break;
@@ -3334,7 +3329,7 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
3334 3329
3335 if (ret == 0) 3330 if (ret == 0)
3336 tsec->create_sid = isec->sid; 3331 tsec->create_sid = isec->sid;
3337 return 0; 3332 return ret;
3338} 3333}
3339 3334
3340static int selinux_kernel_module_request(char *kmod_name) 3335static int selinux_kernel_module_request(char *kmod_name)
@@ -5672,9 +5667,6 @@ static __init int selinux_init(void)
5672 0, SLAB_PANIC, NULL); 5667 0, SLAB_PANIC, NULL);
5673 avc_init(); 5668 avc_init();
5674 5669
5675 secondary_ops = security_ops;
5676 if (!secondary_ops)
5677 panic("SELinux: No initial security operations\n");
5678 if (register_security(&selinux_ops)) 5670 if (register_security(&selinux_ops))
5679 panic("SELinux: Unable to register with kernel.\n"); 5671 panic("SELinux: Unable to register with kernel.\n");
5680 5672
@@ -5835,8 +5827,7 @@ int selinux_disable(void)
5835 selinux_disabled = 1; 5827 selinux_disabled = 1;
5836 selinux_enabled = 0; 5828 selinux_enabled = 0;
5837 5829
5838 /* Reset security_ops to the secondary module, dummy or capability. */ 5830 reset_security_ops();
5839 security_ops = secondary_ops;
5840 5831
5841 /* Try to destroy the avc node cache */ 5832 /* Try to destroy the avc node cache */
5842 avc_disable(); 5833 avc_disable();
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 2553266ad793..1f7c2491d3dc 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -57,7 +57,6 @@
57struct netlbl_lsm_secattr; 57struct netlbl_lsm_secattr;
58 58
59extern int selinux_enabled; 59extern int selinux_enabled;
60extern int selinux_mls_enabled;
61 60
62/* Policy capabilities */ 61/* Policy capabilities */
63enum { 62enum {
@@ -80,6 +79,8 @@ extern int selinux_policycap_openperm;
80/* limitation of boundary depth */ 79/* limitation of boundary depth */
81#define POLICYDB_BOUNDS_MAXDEPTH 4 80#define POLICYDB_BOUNDS_MAXDEPTH 4
82 81
82int security_mls_enabled(void);
83
83int security_load_policy(void *data, size_t len); 84int security_load_policy(void *data, size_t len);
84 85
85int security_policycap_supported(unsigned int req_cap); 86int security_policycap_supported(unsigned int req_cap);
@@ -96,13 +97,11 @@ struct av_decision {
96/* definitions of av_decision.flags */ 97/* definitions of av_decision.flags */
97#define AVD_FLAGS_PERMISSIVE 0x0001 98#define AVD_FLAGS_PERMISSIVE 0x0001
98 99
99int security_compute_av(u32 ssid, u32 tsid, 100void security_compute_av(u32 ssid, u32 tsid,
100 u16 tclass, u32 requested, 101 u16 tclass, struct av_decision *avd);
101 struct av_decision *avd);
102 102
103int security_compute_av_user(u32 ssid, u32 tsid, 103void security_compute_av_user(u32 ssid, u32 tsid,
104 u16 tclass, u32 requested, 104 u16 tclass, struct av_decision *avd);
105 struct av_decision *avd);
106 105
107int security_transition_sid(u32 ssid, u32 tsid, 106int security_transition_sid(u32 ssid, u32 tsid,
108 u16 tclass, u32 *out_sid); 107 u16 tclass, u32 *out_sid);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index fab36fdf2769..cd191bbec03c 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -282,7 +282,8 @@ static ssize_t sel_read_mls(struct file *filp, char __user *buf,
282 char tmpbuf[TMPBUFLEN]; 282 char tmpbuf[TMPBUFLEN];
283 ssize_t length; 283 ssize_t length;
284 284
285 length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_mls_enabled); 285 length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
286 security_mls_enabled());
286 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 287 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
287} 288}
288 289
@@ -494,7 +495,6 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
494 char *scon, *tcon; 495 char *scon, *tcon;
495 u32 ssid, tsid; 496 u32 ssid, tsid;
496 u16 tclass; 497 u16 tclass;
497 u32 req;
498 struct av_decision avd; 498 struct av_decision avd;
499 ssize_t length; 499 ssize_t length;
500 500
@@ -512,7 +512,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
512 goto out; 512 goto out;
513 513
514 length = -EINVAL; 514 length = -EINVAL;
515 if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4) 515 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
516 goto out2; 516 goto out2;
517 517
518 length = security_context_to_sid(scon, strlen(scon)+1, &ssid); 518 length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
@@ -522,9 +522,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
522 if (length < 0) 522 if (length < 0)
523 goto out2; 523 goto out2;
524 524
525 length = security_compute_av_user(ssid, tsid, tclass, req, &avd); 525 security_compute_av_user(ssid, tsid, tclass, &avd);
526 if (length < 0)
527 goto out2;
528 526
529 length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, 527 length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
530 "%x %x %x %x %u %x", 528 "%x %x %x %x %u %x",
@@ -979,6 +977,8 @@ static int sel_make_bools(void)
979 u32 sid; 977 u32 sid;
980 978
981 /* remove any existing files */ 979 /* remove any existing files */
980 for (i = 0; i < bool_num; i++)
981 kfree(bool_pending_names[i]);
982 kfree(bool_pending_names); 982 kfree(bool_pending_names);
983 kfree(bool_pending_values); 983 kfree(bool_pending_values);
984 bool_pending_names = NULL; 984 bool_pending_names = NULL;
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index d9dd7a2f6a8a..45e8fb0515f8 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -41,9 +41,6 @@ static inline int mls_context_cpy(struct context *dst, struct context *src)
41{ 41{
42 int rc; 42 int rc;
43 43
44 if (!selinux_mls_enabled)
45 return 0;
46
47 dst->range.level[0].sens = src->range.level[0].sens; 44 dst->range.level[0].sens = src->range.level[0].sens;
48 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); 45 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
49 if (rc) 46 if (rc)
@@ -64,9 +61,6 @@ static inline int mls_context_cpy_low(struct context *dst, struct context *src)
64{ 61{
65 int rc; 62 int rc;
66 63
67 if (!selinux_mls_enabled)
68 return 0;
69
70 dst->range.level[0].sens = src->range.level[0].sens; 64 dst->range.level[0].sens = src->range.level[0].sens;
71 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); 65 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
72 if (rc) 66 if (rc)
@@ -82,9 +76,6 @@ out:
82 76
83static inline int mls_context_cmp(struct context *c1, struct context *c2) 77static inline int mls_context_cmp(struct context *c1, struct context *c2)
84{ 78{
85 if (!selinux_mls_enabled)
86 return 1;
87
88 return ((c1->range.level[0].sens == c2->range.level[0].sens) && 79 return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
89 ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) && 80 ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
90 (c1->range.level[1].sens == c2->range.level[1].sens) && 81 (c1->range.level[1].sens == c2->range.level[1].sens) &&
@@ -93,9 +84,6 @@ static inline int mls_context_cmp(struct context *c1, struct context *c2)
93 84
94static inline void mls_context_destroy(struct context *c) 85static inline void mls_context_destroy(struct context *c)
95{ 86{
96 if (!selinux_mls_enabled)
97 return;
98
99 ebitmap_destroy(&c->range.level[0].cat); 87 ebitmap_destroy(&c->range.level[0].cat);
100 ebitmap_destroy(&c->range.level[1].cat); 88 ebitmap_destroy(&c->range.level[1].cat);
101 mls_context_init(c); 89 mls_context_init(c);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 3f2b2706b5bb..372b773f8210 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -39,7 +39,7 @@ int mls_compute_context_len(struct context *context)
39 struct ebitmap *e; 39 struct ebitmap *e;
40 struct ebitmap_node *node; 40 struct ebitmap_node *node;
41 41
42 if (!selinux_mls_enabled) 42 if (!policydb.mls_enabled)
43 return 0; 43 return 0;
44 44
45 len = 1; /* for the beginning ":" */ 45 len = 1; /* for the beginning ":" */
@@ -93,7 +93,7 @@ void mls_sid_to_context(struct context *context,
93 struct ebitmap *e; 93 struct ebitmap *e;
94 struct ebitmap_node *node; 94 struct ebitmap_node *node;
95 95
96 if (!selinux_mls_enabled) 96 if (!policydb.mls_enabled)
97 return; 97 return;
98 98
99 scontextp = *scontext; 99 scontextp = *scontext;
@@ -200,7 +200,7 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
200{ 200{
201 struct user_datum *usrdatum; 201 struct user_datum *usrdatum;
202 202
203 if (!selinux_mls_enabled) 203 if (!p->mls_enabled)
204 return 1; 204 return 1;
205 205
206 if (!mls_range_isvalid(p, &c->range)) 206 if (!mls_range_isvalid(p, &c->range))
@@ -253,7 +253,7 @@ int mls_context_to_sid(struct policydb *pol,
253 struct cat_datum *catdatum, *rngdatum; 253 struct cat_datum *catdatum, *rngdatum;
254 int l, rc = -EINVAL; 254 int l, rc = -EINVAL;
255 255
256 if (!selinux_mls_enabled) { 256 if (!pol->mls_enabled) {
257 if (def_sid != SECSID_NULL && oldc) 257 if (def_sid != SECSID_NULL && oldc)
258 *scontext += strlen(*scontext)+1; 258 *scontext += strlen(*scontext)+1;
259 return 0; 259 return 0;
@@ -387,7 +387,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
387 char *tmpstr, *freestr; 387 char *tmpstr, *freestr;
388 int rc; 388 int rc;
389 389
390 if (!selinux_mls_enabled) 390 if (!policydb.mls_enabled)
391 return -EINVAL; 391 return -EINVAL;
392 392
393 /* we need freestr because mls_context_to_sid will change 393 /* we need freestr because mls_context_to_sid will change
@@ -407,7 +407,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
407/* 407/*
408 * Copies the MLS range `range' into `context'. 408 * Copies the MLS range `range' into `context'.
409 */ 409 */
410static inline int mls_range_set(struct context *context, 410int mls_range_set(struct context *context,
411 struct mls_range *range) 411 struct mls_range *range)
412{ 412{
413 int l, rc = 0; 413 int l, rc = 0;
@@ -427,7 +427,7 @@ static inline int mls_range_set(struct context *context,
427int mls_setup_user_range(struct context *fromcon, struct user_datum *user, 427int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
428 struct context *usercon) 428 struct context *usercon)
429{ 429{
430 if (selinux_mls_enabled) { 430 if (policydb.mls_enabled) {
431 struct mls_level *fromcon_sen = &(fromcon->range.level[0]); 431 struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
432 struct mls_level *fromcon_clr = &(fromcon->range.level[1]); 432 struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
433 struct mls_level *user_low = &(user->range.level[0]); 433 struct mls_level *user_low = &(user->range.level[0]);
@@ -477,7 +477,7 @@ int mls_convert_context(struct policydb *oldp,
477 struct ebitmap_node *node; 477 struct ebitmap_node *node;
478 int l, i; 478 int l, i;
479 479
480 if (!selinux_mls_enabled) 480 if (!policydb.mls_enabled)
481 return 0; 481 return 0;
482 482
483 for (l = 0; l < 2; l++) { 483 for (l = 0; l < 2; l++) {
@@ -513,23 +513,21 @@ int mls_compute_sid(struct context *scontext,
513 u32 specified, 513 u32 specified,
514 struct context *newcontext) 514 struct context *newcontext)
515{ 515{
516 struct range_trans *rtr; 516 struct range_trans rtr;
517 struct mls_range *r;
517 518
518 if (!selinux_mls_enabled) 519 if (!policydb.mls_enabled)
519 return 0; 520 return 0;
520 521
521 switch (specified) { 522 switch (specified) {
522 case AVTAB_TRANSITION: 523 case AVTAB_TRANSITION:
523 /* Look for a range transition rule. */ 524 /* Look for a range transition rule. */
524 for (rtr = policydb.range_tr; rtr; rtr = rtr->next) { 525 rtr.source_type = scontext->type;
525 if (rtr->source_type == scontext->type && 526 rtr.target_type = tcontext->type;
526 rtr->target_type == tcontext->type && 527 rtr.target_class = tclass;
527 rtr->target_class == tclass) { 528 r = hashtab_search(policydb.range_tr, &rtr);
528 /* Set the range from the rule */ 529 if (r)
529 return mls_range_set(newcontext, 530 return mls_range_set(newcontext, r);
530 &rtr->target_range);
531 }
532 }
533 /* Fallthrough */ 531 /* Fallthrough */
534 case AVTAB_CHANGE: 532 case AVTAB_CHANGE:
535 if (tclass == policydb.process_class) 533 if (tclass == policydb.process_class)
@@ -541,8 +539,8 @@ int mls_compute_sid(struct context *scontext,
541 case AVTAB_MEMBER: 539 case AVTAB_MEMBER:
542 /* Use the process effective MLS attributes. */ 540 /* Use the process effective MLS attributes. */
543 return mls_context_cpy_low(newcontext, scontext); 541 return mls_context_cpy_low(newcontext, scontext);
544 default: 542
545 return -EINVAL; 543 /* fall through */
546 } 544 }
547 return -EINVAL; 545 return -EINVAL;
548} 546}
@@ -561,7 +559,7 @@ int mls_compute_sid(struct context *scontext,
561void mls_export_netlbl_lvl(struct context *context, 559void mls_export_netlbl_lvl(struct context *context,
562 struct netlbl_lsm_secattr *secattr) 560 struct netlbl_lsm_secattr *secattr)
563{ 561{
564 if (!selinux_mls_enabled) 562 if (!policydb.mls_enabled)
565 return; 563 return;
566 564
567 secattr->attr.mls.lvl = context->range.level[0].sens - 1; 565 secattr->attr.mls.lvl = context->range.level[0].sens - 1;
@@ -581,7 +579,7 @@ void mls_export_netlbl_lvl(struct context *context,
581void mls_import_netlbl_lvl(struct context *context, 579void mls_import_netlbl_lvl(struct context *context,
582 struct netlbl_lsm_secattr *secattr) 580 struct netlbl_lsm_secattr *secattr)
583{ 581{
584 if (!selinux_mls_enabled) 582 if (!policydb.mls_enabled)
585 return; 583 return;
586 584
587 context->range.level[0].sens = secattr->attr.mls.lvl + 1; 585 context->range.level[0].sens = secattr->attr.mls.lvl + 1;
@@ -603,7 +601,7 @@ int mls_export_netlbl_cat(struct context *context,
603{ 601{
604 int rc; 602 int rc;
605 603
606 if (!selinux_mls_enabled) 604 if (!policydb.mls_enabled)
607 return 0; 605 return 0;
608 606
609 rc = ebitmap_netlbl_export(&context->range.level[0].cat, 607 rc = ebitmap_netlbl_export(&context->range.level[0].cat,
@@ -631,7 +629,7 @@ int mls_import_netlbl_cat(struct context *context,
631{ 629{
632 int rc; 630 int rc;
633 631
634 if (!selinux_mls_enabled) 632 if (!policydb.mls_enabled)
635 return 0; 633 return 0;
636 634
637 rc = ebitmap_netlbl_import(&context->range.level[0].cat, 635 rc = ebitmap_netlbl_import(&context->range.level[0].cat,
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 1276715aaa8b..cd9152632e54 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -39,6 +39,8 @@ int mls_context_to_sid(struct policydb *p,
39 39
40int mls_from_string(char *str, struct context *context, gfp_t gfp_mask); 40int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
41 41
42int mls_range_set(struct context *context, struct mls_range *range);
43
42int mls_convert_context(struct policydb *oldp, 44int mls_convert_context(struct policydb *oldp,
43 struct policydb *newp, 45 struct policydb *newp,
44 struct context *context); 46 struct context *context);
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index b6e943a21061..03bed52a8052 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -15,6 +15,7 @@
15#define _SS_MLS_TYPES_H_ 15#define _SS_MLS_TYPES_H_
16 16
17#include "security.h" 17#include "security.h"
18#include "ebitmap.h"
18 19
19struct mls_level { 20struct mls_level {
20 u32 sens; /* sensitivity */ 21 u32 sens; /* sensitivity */
@@ -27,18 +28,12 @@ struct mls_range {
27 28
28static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2) 29static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
29{ 30{
30 if (!selinux_mls_enabled)
31 return 1;
32
33 return ((l1->sens == l2->sens) && 31 return ((l1->sens == l2->sens) &&
34 ebitmap_cmp(&l1->cat, &l2->cat)); 32 ebitmap_cmp(&l1->cat, &l2->cat));
35} 33}
36 34
37static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2) 35static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
38{ 36{
39 if (!selinux_mls_enabled)
40 return 1;
41
42 return ((l1->sens >= l2->sens) && 37 return ((l1->sens >= l2->sens) &&
43 ebitmap_contains(&l1->cat, &l2->cat)); 38 ebitmap_contains(&l1->cat, &l2->cat));
44} 39}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f03667213ea8..23c6e53c102c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -52,8 +52,6 @@ static char *symtab_name[SYM_NUM] = {
52}; 52};
53#endif 53#endif
54 54
55int selinux_mls_enabled;
56
57static unsigned int symtab_sizes[SYM_NUM] = { 55static unsigned int symtab_sizes[SYM_NUM] = {
58 2, 56 2,
59 32, 57 32,
@@ -177,6 +175,21 @@ out_free_role:
177 goto out; 175 goto out;
178} 176}
179 177
178static u32 rangetr_hash(struct hashtab *h, const void *k)
179{
180 const struct range_trans *key = k;
181 return (key->source_type + (key->target_type << 3) +
182 (key->target_class << 5)) & (h->size - 1);
183}
184
185static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
186{
187 const struct range_trans *key1 = k1, *key2 = k2;
188 return (key1->source_type != key2->source_type ||
189 key1->target_type != key2->target_type ||
190 key1->target_class != key2->target_class);
191}
192
180/* 193/*
181 * Initialize a policy database structure. 194 * Initialize a policy database structure.
182 */ 195 */
@@ -204,6 +217,10 @@ static int policydb_init(struct policydb *p)
204 if (rc) 217 if (rc)
205 goto out_free_symtab; 218 goto out_free_symtab;
206 219
220 p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
221 if (!p->range_tr)
222 goto out_free_symtab;
223
207 ebitmap_init(&p->policycaps); 224 ebitmap_init(&p->policycaps);
208 ebitmap_init(&p->permissive_map); 225 ebitmap_init(&p->permissive_map);
209 226
@@ -408,6 +425,20 @@ static void symtab_hash_eval(struct symtab *s)
408 info.slots_used, h->size, info.max_chain_len); 425 info.slots_used, h->size, info.max_chain_len);
409 } 426 }
410} 427}
428
429static void rangetr_hash_eval(struct hashtab *h)
430{
431 struct hashtab_info info;
432
433 hashtab_stat(h, &info);
434 printk(KERN_DEBUG "SELinux: rangetr: %d entries and %d/%d buckets used, "
435 "longest chain length %d\n", h->nel,
436 info.slots_used, h->size, info.max_chain_len);
437}
438#else
439static inline void rangetr_hash_eval(struct hashtab *h)
440{
441}
411#endif 442#endif
412 443
413/* 444/*
@@ -422,7 +453,7 @@ static int policydb_index_others(struct policydb *p)
422 453
423 printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools", 454 printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
424 p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim); 455 p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
425 if (selinux_mls_enabled) 456 if (p->mls_enabled)
426 printk(", %d sens, %d cats", p->p_levels.nprim, 457 printk(", %d sens, %d cats", p->p_levels.nprim,
427 p->p_cats.nprim); 458 p->p_cats.nprim);
428 printk("\n"); 459 printk("\n");
@@ -612,6 +643,17 @@ static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
612 cat_destroy, 643 cat_destroy,
613}; 644};
614 645
646static int range_tr_destroy(void *key, void *datum, void *p)
647{
648 struct mls_range *rt = datum;
649 kfree(key);
650 ebitmap_destroy(&rt->level[0].cat);
651 ebitmap_destroy(&rt->level[1].cat);
652 kfree(datum);
653 cond_resched();
654 return 0;
655}
656
615static void ocontext_destroy(struct ocontext *c, int i) 657static void ocontext_destroy(struct ocontext *c, int i)
616{ 658{
617 context_destroy(&c->context[0]); 659 context_destroy(&c->context[0]);
@@ -632,7 +674,6 @@ void policydb_destroy(struct policydb *p)
632 int i; 674 int i;
633 struct role_allow *ra, *lra = NULL; 675 struct role_allow *ra, *lra = NULL;
634 struct role_trans *tr, *ltr = NULL; 676 struct role_trans *tr, *ltr = NULL;
635 struct range_trans *rt, *lrt = NULL;
636 677
637 for (i = 0; i < SYM_NUM; i++) { 678 for (i = 0; i < SYM_NUM; i++) {
638 cond_resched(); 679 cond_resched();
@@ -693,20 +734,8 @@ void policydb_destroy(struct policydb *p)
693 } 734 }
694 kfree(lra); 735 kfree(lra);
695 736
696 for (rt = p->range_tr; rt; rt = rt->next) { 737 hashtab_map(p->range_tr, range_tr_destroy, NULL);
697 cond_resched(); 738 hashtab_destroy(p->range_tr);
698 if (lrt) {
699 ebitmap_destroy(&lrt->target_range.level[0].cat);
700 ebitmap_destroy(&lrt->target_range.level[1].cat);
701 kfree(lrt);
702 }
703 lrt = rt;
704 }
705 if (lrt) {
706 ebitmap_destroy(&lrt->target_range.level[0].cat);
707 ebitmap_destroy(&lrt->target_range.level[1].cat);
708 kfree(lrt);
709 }
710 739
711 if (p->type_attr_map) { 740 if (p->type_attr_map) {
712 for (i = 0; i < p->p_types.nprim; i++) 741 for (i = 0; i < p->p_types.nprim; i++)
@@ -1686,12 +1715,11 @@ int policydb_read(struct policydb *p, void *fp)
1686 int i, j, rc; 1715 int i, j, rc;
1687 __le32 buf[4]; 1716 __le32 buf[4];
1688 u32 nodebuf[8]; 1717 u32 nodebuf[8];
1689 u32 len, len2, config, nprim, nel, nel2; 1718 u32 len, len2, nprim, nel, nel2;
1690 char *policydb_str; 1719 char *policydb_str;
1691 struct policydb_compat_info *info; 1720 struct policydb_compat_info *info;
1692 struct range_trans *rt, *lrt; 1721 struct range_trans *rt;
1693 1722 struct mls_range *r;
1694 config = 0;
1695 1723
1696 rc = policydb_init(p); 1724 rc = policydb_init(p);
1697 if (rc) 1725 if (rc)
@@ -1740,7 +1768,7 @@ int policydb_read(struct policydb *p, void *fp)
1740 kfree(policydb_str); 1768 kfree(policydb_str);
1741 policydb_str = NULL; 1769 policydb_str = NULL;
1742 1770
1743 /* Read the version, config, and table sizes. */ 1771 /* Read the version and table sizes. */
1744 rc = next_entry(buf, fp, sizeof(u32)*4); 1772 rc = next_entry(buf, fp, sizeof(u32)*4);
1745 if (rc < 0) 1773 if (rc < 0)
1746 goto bad; 1774 goto bad;
@@ -1755,13 +1783,7 @@ int policydb_read(struct policydb *p, void *fp)
1755 } 1783 }
1756 1784
1757 if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) { 1785 if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
1758 if (ss_initialized && !selinux_mls_enabled) { 1786 p->mls_enabled = 1;
1759 printk(KERN_ERR "SELinux: Cannot switch between non-MLS"
1760 " and MLS policies\n");
1761 goto bad;
1762 }
1763 selinux_mls_enabled = 1;
1764 config |= POLICYDB_CONFIG_MLS;
1765 1787
1766 if (p->policyvers < POLICYDB_VERSION_MLS) { 1788 if (p->policyvers < POLICYDB_VERSION_MLS) {
1767 printk(KERN_ERR "SELinux: security policydb version %d " 1789 printk(KERN_ERR "SELinux: security policydb version %d "
@@ -1769,12 +1791,6 @@ int policydb_read(struct policydb *p, void *fp)
1769 p->policyvers); 1791 p->policyvers);
1770 goto bad; 1792 goto bad;
1771 } 1793 }
1772 } else {
1773 if (ss_initialized && selinux_mls_enabled) {
1774 printk(KERN_ERR "SELinux: Cannot switch between MLS and"
1775 " non-MLS policies\n");
1776 goto bad;
1777 }
1778 } 1794 }
1779 p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN); 1795 p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
1780 p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN); 1796 p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
@@ -2122,44 +2138,61 @@ int policydb_read(struct policydb *p, void *fp)
2122 if (rc < 0) 2138 if (rc < 0)
2123 goto bad; 2139 goto bad;
2124 nel = le32_to_cpu(buf[0]); 2140 nel = le32_to_cpu(buf[0]);
2125 lrt = NULL;
2126 for (i = 0; i < nel; i++) { 2141 for (i = 0; i < nel; i++) {
2127 rt = kzalloc(sizeof(*rt), GFP_KERNEL); 2142 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
2128 if (!rt) { 2143 if (!rt) {
2129 rc = -ENOMEM; 2144 rc = -ENOMEM;
2130 goto bad; 2145 goto bad;
2131 } 2146 }
2132 if (lrt)
2133 lrt->next = rt;
2134 else
2135 p->range_tr = rt;
2136 rc = next_entry(buf, fp, (sizeof(u32) * 2)); 2147 rc = next_entry(buf, fp, (sizeof(u32) * 2));
2137 if (rc < 0) 2148 if (rc < 0) {
2149 kfree(rt);
2138 goto bad; 2150 goto bad;
2151 }
2139 rt->source_type = le32_to_cpu(buf[0]); 2152 rt->source_type = le32_to_cpu(buf[0]);
2140 rt->target_type = le32_to_cpu(buf[1]); 2153 rt->target_type = le32_to_cpu(buf[1]);
2141 if (new_rangetr) { 2154 if (new_rangetr) {
2142 rc = next_entry(buf, fp, sizeof(u32)); 2155 rc = next_entry(buf, fp, sizeof(u32));
2143 if (rc < 0) 2156 if (rc < 0) {
2157 kfree(rt);
2144 goto bad; 2158 goto bad;
2159 }
2145 rt->target_class = le32_to_cpu(buf[0]); 2160 rt->target_class = le32_to_cpu(buf[0]);
2146 } else 2161 } else
2147 rt->target_class = p->process_class; 2162 rt->target_class = p->process_class;
2148 if (!policydb_type_isvalid(p, rt->source_type) || 2163 if (!policydb_type_isvalid(p, rt->source_type) ||
2149 !policydb_type_isvalid(p, rt->target_type) || 2164 !policydb_type_isvalid(p, rt->target_type) ||
2150 !policydb_class_isvalid(p, rt->target_class)) { 2165 !policydb_class_isvalid(p, rt->target_class)) {
2166 kfree(rt);
2151 rc = -EINVAL; 2167 rc = -EINVAL;
2152 goto bad; 2168 goto bad;
2153 } 2169 }
2154 rc = mls_read_range_helper(&rt->target_range, fp); 2170 r = kzalloc(sizeof(*r), GFP_KERNEL);
2155 if (rc) 2171 if (!r) {
2172 kfree(rt);
2173 rc = -ENOMEM;
2156 goto bad; 2174 goto bad;
2157 if (!mls_range_isvalid(p, &rt->target_range)) { 2175 }
2176 rc = mls_read_range_helper(r, fp);
2177 if (rc) {
2178 kfree(rt);
2179 kfree(r);
2180 goto bad;
2181 }
2182 if (!mls_range_isvalid(p, r)) {
2158 printk(KERN_WARNING "SELinux: rangetrans: invalid range\n"); 2183 printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
2184 kfree(rt);
2185 kfree(r);
2186 goto bad;
2187 }
2188 rc = hashtab_insert(p->range_tr, rt, r);
2189 if (rc) {
2190 kfree(rt);
2191 kfree(r);
2159 goto bad; 2192 goto bad;
2160 } 2193 }
2161 lrt = rt;
2162 } 2194 }
2195 rangetr_hash_eval(p->range_tr);
2163 } 2196 }
2164 2197
2165 p->type_attr_map = kmalloc(p->p_types.nprim*sizeof(struct ebitmap), GFP_KERNEL); 2198 p->type_attr_map = kmalloc(p->p_types.nprim*sizeof(struct ebitmap), GFP_KERNEL);
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index cdcc5700946f..26d9adf8542b 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -27,6 +27,8 @@
27#include "symtab.h" 27#include "symtab.h"
28#include "avtab.h" 28#include "avtab.h"
29#include "sidtab.h" 29#include "sidtab.h"
30#include "ebitmap.h"
31#include "mls_types.h"
30#include "context.h" 32#include "context.h"
31#include "constraint.h" 33#include "constraint.h"
32 34
@@ -113,8 +115,6 @@ struct range_trans {
113 u32 source_type; 115 u32 source_type;
114 u32 target_type; 116 u32 target_type;
115 u32 target_class; 117 u32 target_class;
116 struct mls_range target_range;
117 struct range_trans *next;
118}; 118};
119 119
120/* Boolean data type */ 120/* Boolean data type */
@@ -187,6 +187,8 @@ struct genfs {
187 187
188/* The policy database */ 188/* The policy database */
189struct policydb { 189struct policydb {
190 int mls_enabled;
191
190 /* symbol tables */ 192 /* symbol tables */
191 struct symtab symtab[SYM_NUM]; 193 struct symtab symtab[SYM_NUM];
192#define p_commons symtab[SYM_COMMONS] 194#define p_commons symtab[SYM_COMMONS]
@@ -240,8 +242,8 @@ struct policydb {
240 fixed labeling behavior. */ 242 fixed labeling behavior. */
241 struct genfs *genfs; 243 struct genfs *genfs;
242 244
243 /* range transitions */ 245 /* range transitions table (range_trans_key -> mls_range) */
244 struct range_trans *range_tr; 246 struct hashtab *range_tr;
245 247
246 /* type -> attribute reverse mapping */ 248 /* type -> attribute reverse mapping */
247 struct ebitmap *type_attr_map; 249 struct ebitmap *type_attr_map;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b3efae204ac7..cf27b3ee1a95 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -26,6 +26,10 @@
26 * 26 *
27 * Added support for bounds domain and audit messaged on masked permissions 27 * Added support for bounds domain and audit messaged on masked permissions
28 * 28 *
29 * Updated: Guido Trentalancia <guido@trentalancia.com>
30 *
31 * Added support for runtime switching of the policy type
32 *
29 * Copyright (C) 2008, 2009 NEC Corporation 33 * Copyright (C) 2008, 2009 NEC Corporation
30 * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P. 34 * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
31 * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc. 35 * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
@@ -87,11 +91,10 @@ static u32 latest_granting;
87static int context_struct_to_string(struct context *context, char **scontext, 91static int context_struct_to_string(struct context *context, char **scontext,
88 u32 *scontext_len); 92 u32 *scontext_len);
89 93
90static int context_struct_compute_av(struct context *scontext, 94static void context_struct_compute_av(struct context *scontext,
91 struct context *tcontext, 95 struct context *tcontext,
92 u16 tclass, 96 u16 tclass,
93 u32 requested, 97 struct av_decision *avd);
94 struct av_decision *avd);
95 98
96struct selinux_mapping { 99struct selinux_mapping {
97 u16 value; /* policy value */ 100 u16 value; /* policy value */
@@ -196,23 +199,6 @@ static u16 unmap_class(u16 tclass)
196 return tclass; 199 return tclass;
197} 200}
198 201
199static u32 unmap_perm(u16 tclass, u32 tperm)
200{
201 if (tclass < current_mapping_size) {
202 unsigned i;
203 u32 kperm = 0;
204
205 for (i = 0; i < current_mapping[tclass].num_perms; i++)
206 if (tperm & (1<<i)) {
207 kperm |= current_mapping[tclass].perms[i];
208 tperm &= ~(1<<i);
209 }
210 return kperm;
211 }
212
213 return tperm;
214}
215
216static void map_decision(u16 tclass, struct av_decision *avd, 202static void map_decision(u16 tclass, struct av_decision *avd,
217 int allow_unknown) 203 int allow_unknown)
218{ 204{
@@ -250,6 +236,10 @@ static void map_decision(u16 tclass, struct av_decision *avd,
250 } 236 }
251} 237}
252 238
239int security_mls_enabled(void)
240{
241 return policydb.mls_enabled;
242}
253 243
254/* 244/*
255 * Return the boolean value of a constraint expression 245 * Return the boolean value of a constraint expression
@@ -465,7 +455,8 @@ static void security_dump_masked_av(struct context *scontext,
465 char *scontext_name = NULL; 455 char *scontext_name = NULL;
466 char *tcontext_name = NULL; 456 char *tcontext_name = NULL;
467 char *permission_names[32]; 457 char *permission_names[32];
468 int index, length; 458 int index;
459 u32 length;
469 bool need_comma = false; 460 bool need_comma = false;
470 461
471 if (!permissions) 462 if (!permissions)
@@ -532,7 +523,6 @@ out:
532static void type_attribute_bounds_av(struct context *scontext, 523static void type_attribute_bounds_av(struct context *scontext,
533 struct context *tcontext, 524 struct context *tcontext,
534 u16 tclass, 525 u16 tclass,
535 u32 requested,
536 struct av_decision *avd) 526 struct av_decision *avd)
537{ 527{
538 struct context lo_scontext; 528 struct context lo_scontext;
@@ -553,7 +543,6 @@ static void type_attribute_bounds_av(struct context *scontext,
553 context_struct_compute_av(&lo_scontext, 543 context_struct_compute_av(&lo_scontext,
554 tcontext, 544 tcontext,
555 tclass, 545 tclass,
556 requested,
557 &lo_avd); 546 &lo_avd);
558 if ((lo_avd.allowed & avd->allowed) == avd->allowed) 547 if ((lo_avd.allowed & avd->allowed) == avd->allowed)
559 return; /* no masked permission */ 548 return; /* no masked permission */
@@ -569,7 +558,6 @@ static void type_attribute_bounds_av(struct context *scontext,
569 context_struct_compute_av(scontext, 558 context_struct_compute_av(scontext,
570 &lo_tcontext, 559 &lo_tcontext,
571 tclass, 560 tclass,
572 requested,
573 &lo_avd); 561 &lo_avd);
574 if ((lo_avd.allowed & avd->allowed) == avd->allowed) 562 if ((lo_avd.allowed & avd->allowed) == avd->allowed)
575 return; /* no masked permission */ 563 return; /* no masked permission */
@@ -586,7 +574,6 @@ static void type_attribute_bounds_av(struct context *scontext,
586 context_struct_compute_av(&lo_scontext, 574 context_struct_compute_av(&lo_scontext,
587 &lo_tcontext, 575 &lo_tcontext,
588 tclass, 576 tclass,
589 requested,
590 &lo_avd); 577 &lo_avd);
591 if ((lo_avd.allowed & avd->allowed) == avd->allowed) 578 if ((lo_avd.allowed & avd->allowed) == avd->allowed)
592 return; /* no masked permission */ 579 return; /* no masked permission */
@@ -607,11 +594,10 @@ static void type_attribute_bounds_av(struct context *scontext,
607 * Compute access vectors based on a context structure pair for 594 * Compute access vectors based on a context structure pair for
608 * the permissions in a particular class. 595 * the permissions in a particular class.
609 */ 596 */
610static int context_struct_compute_av(struct context *scontext, 597static void context_struct_compute_av(struct context *scontext,
611 struct context *tcontext, 598 struct context *tcontext,
612 u16 tclass, 599 u16 tclass,
613 u32 requested, 600 struct av_decision *avd)
614 struct av_decision *avd)
615{ 601{
616 struct constraint_node *constraint; 602 struct constraint_node *constraint;
617 struct role_allow *ra; 603 struct role_allow *ra;
@@ -622,19 +608,14 @@ static int context_struct_compute_av(struct context *scontext,
622 struct ebitmap_node *snode, *tnode; 608 struct ebitmap_node *snode, *tnode;
623 unsigned int i, j; 609 unsigned int i, j;
624 610
625 /*
626 * Initialize the access vectors to the default values.
627 */
628 avd->allowed = 0; 611 avd->allowed = 0;
629 avd->auditallow = 0; 612 avd->auditallow = 0;
630 avd->auditdeny = 0xffffffff; 613 avd->auditdeny = 0xffffffff;
631 avd->seqno = latest_granting;
632 avd->flags = 0;
633 614
634 if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) { 615 if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
635 if (printk_ratelimit()) 616 if (printk_ratelimit())
636 printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass); 617 printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
637 return -EINVAL; 618 return;
638 } 619 }
639 620
640 tclass_datum = policydb.class_val_to_struct[tclass - 1]; 621 tclass_datum = policydb.class_val_to_struct[tclass - 1];
@@ -705,9 +686,7 @@ static int context_struct_compute_av(struct context *scontext,
705 * permission and notice it to userspace via audit. 686 * permission and notice it to userspace via audit.
706 */ 687 */
707 type_attribute_bounds_av(scontext, tcontext, 688 type_attribute_bounds_av(scontext, tcontext,
708 tclass, requested, avd); 689 tclass, avd);
709
710 return 0;
711} 690}
712 691
713static int security_validtrans_handle_fail(struct context *ocontext, 692static int security_validtrans_handle_fail(struct context *ocontext,
@@ -864,7 +843,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
864 if (rc) { 843 if (rc) {
865 char *old_name = NULL; 844 char *old_name = NULL;
866 char *new_name = NULL; 845 char *new_name = NULL;
867 int length; 846 u32 length;
868 847
869 if (!context_struct_to_string(old_context, 848 if (!context_struct_to_string(old_context,
870 &old_name, &length) && 849 &old_name, &length) &&
@@ -886,110 +865,116 @@ out:
886 return rc; 865 return rc;
887} 866}
888 867
889 868static void avd_init(struct av_decision *avd)
890static int security_compute_av_core(u32 ssid,
891 u32 tsid,
892 u16 tclass,
893 u32 requested,
894 struct av_decision *avd)
895{ 869{
896 struct context *scontext = NULL, *tcontext = NULL; 870 avd->allowed = 0;
897 int rc = 0; 871 avd->auditallow = 0;
898 872 avd->auditdeny = 0xffffffff;
899 scontext = sidtab_search(&sidtab, ssid); 873 avd->seqno = latest_granting;
900 if (!scontext) { 874 avd->flags = 0;
901 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
902 __func__, ssid);
903 return -EINVAL;
904 }
905 tcontext = sidtab_search(&sidtab, tsid);
906 if (!tcontext) {
907 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
908 __func__, tsid);
909 return -EINVAL;
910 }
911
912 rc = context_struct_compute_av(scontext, tcontext, tclass,
913 requested, avd);
914
915 /* permissive domain? */
916 if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
917 avd->flags |= AVD_FLAGS_PERMISSIVE;
918
919 return rc;
920} 875}
921 876
877
922/** 878/**
923 * security_compute_av - Compute access vector decisions. 879 * security_compute_av - Compute access vector decisions.
924 * @ssid: source security identifier 880 * @ssid: source security identifier
925 * @tsid: target security identifier 881 * @tsid: target security identifier
926 * @tclass: target security class 882 * @tclass: target security class
927 * @requested: requested permissions
928 * @avd: access vector decisions 883 * @avd: access vector decisions
929 * 884 *
930 * Compute a set of access vector decisions based on the 885 * Compute a set of access vector decisions based on the
931 * SID pair (@ssid, @tsid) for the permissions in @tclass. 886 * SID pair (@ssid, @tsid) for the permissions in @tclass.
932 * Return -%EINVAL if any of the parameters are invalid or %0
933 * if the access vector decisions were computed successfully.
934 */ 887 */
935int security_compute_av(u32 ssid, 888void security_compute_av(u32 ssid,
936 u32 tsid, 889 u32 tsid,
937 u16 orig_tclass, 890 u16 orig_tclass,
938 u32 orig_requested, 891 struct av_decision *avd)
939 struct av_decision *avd)
940{ 892{
941 u16 tclass; 893 u16 tclass;
942 u32 requested; 894 struct context *scontext = NULL, *tcontext = NULL;
943 int rc;
944 895
945 read_lock(&policy_rwlock); 896 read_lock(&policy_rwlock);
946 897 avd_init(avd);
947 if (!ss_initialized) 898 if (!ss_initialized)
948 goto allow; 899 goto allow;
949 900
950 requested = unmap_perm(orig_tclass, orig_requested); 901 scontext = sidtab_search(&sidtab, ssid);
902 if (!scontext) {
903 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
904 __func__, ssid);
905 goto out;
906 }
907
908 /* permissive domain? */
909 if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
910 avd->flags |= AVD_FLAGS_PERMISSIVE;
911
912 tcontext = sidtab_search(&sidtab, tsid);
913 if (!tcontext) {
914 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
915 __func__, tsid);
916 goto out;
917 }
918
951 tclass = unmap_class(orig_tclass); 919 tclass = unmap_class(orig_tclass);
952 if (unlikely(orig_tclass && !tclass)) { 920 if (unlikely(orig_tclass && !tclass)) {
953 if (policydb.allow_unknown) 921 if (policydb.allow_unknown)
954 goto allow; 922 goto allow;
955 rc = -EINVAL;
956 goto out; 923 goto out;
957 } 924 }
958 rc = security_compute_av_core(ssid, tsid, tclass, requested, avd); 925 context_struct_compute_av(scontext, tcontext, tclass, avd);
959 map_decision(orig_tclass, avd, policydb.allow_unknown); 926 map_decision(orig_tclass, avd, policydb.allow_unknown);
960out: 927out:
961 read_unlock(&policy_rwlock); 928 read_unlock(&policy_rwlock);
962 return rc; 929 return;
963allow: 930allow:
964 avd->allowed = 0xffffffff; 931 avd->allowed = 0xffffffff;
965 avd->auditallow = 0;
966 avd->auditdeny = 0xffffffff;
967 avd->seqno = latest_granting;
968 avd->flags = 0;
969 rc = 0;
970 goto out; 932 goto out;
971} 933}
972 934
973int security_compute_av_user(u32 ssid, 935void security_compute_av_user(u32 ssid,
974 u32 tsid, 936 u32 tsid,
975 u16 tclass, 937 u16 tclass,
976 u32 requested, 938 struct av_decision *avd)
977 struct av_decision *avd)
978{ 939{
979 int rc; 940 struct context *scontext = NULL, *tcontext = NULL;
980 941
981 if (!ss_initialized) { 942 read_lock(&policy_rwlock);
982 avd->allowed = 0xffffffff; 943 avd_init(avd);
983 avd->auditallow = 0; 944 if (!ss_initialized)
984 avd->auditdeny = 0xffffffff; 945 goto allow;
985 avd->seqno = latest_granting; 946
986 return 0; 947 scontext = sidtab_search(&sidtab, ssid);
948 if (!scontext) {
949 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
950 __func__, ssid);
951 goto out;
987 } 952 }
988 953
989 read_lock(&policy_rwlock); 954 /* permissive domain? */
990 rc = security_compute_av_core(ssid, tsid, tclass, requested, avd); 955 if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
956 avd->flags |= AVD_FLAGS_PERMISSIVE;
957
958 tcontext = sidtab_search(&sidtab, tsid);
959 if (!tcontext) {
960 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
961 __func__, tsid);
962 goto out;
963 }
964
965 if (unlikely(!tclass)) {
966 if (policydb.allow_unknown)
967 goto allow;
968 goto out;
969 }
970
971 context_struct_compute_av(scontext, tcontext, tclass, avd);
972 out:
991 read_unlock(&policy_rwlock); 973 read_unlock(&policy_rwlock);
992 return rc; 974 return;
975allow:
976 avd->allowed = 0xffffffff;
977 goto out;
993} 978}
994 979
995/* 980/*
@@ -1565,7 +1550,10 @@ static int clone_sid(u32 sid,
1565{ 1550{
1566 struct sidtab *s = arg; 1551 struct sidtab *s = arg;
1567 1552
1568 return sidtab_insert(s, sid, context); 1553 if (sid > SECINITSID_NUM)
1554 return sidtab_insert(s, sid, context);
1555 else
1556 return 0;
1569} 1557}
1570 1558
1571static inline int convert_context_handle_invalid_context(struct context *context) 1559static inline int convert_context_handle_invalid_context(struct context *context)
@@ -1606,12 +1594,17 @@ static int convert_context(u32 key,
1606{ 1594{
1607 struct convert_context_args *args; 1595 struct convert_context_args *args;
1608 struct context oldc; 1596 struct context oldc;
1597 struct ocontext *oc;
1598 struct mls_range *range;
1609 struct role_datum *role; 1599 struct role_datum *role;
1610 struct type_datum *typdatum; 1600 struct type_datum *typdatum;
1611 struct user_datum *usrdatum; 1601 struct user_datum *usrdatum;
1612 char *s; 1602 char *s;
1613 u32 len; 1603 u32 len;
1614 int rc; 1604 int rc = 0;
1605
1606 if (key <= SECINITSID_NUM)
1607 goto out;
1615 1608
1616 args = p; 1609 args = p;
1617 1610
@@ -1673,9 +1666,39 @@ static int convert_context(u32 key,
1673 goto bad; 1666 goto bad;
1674 c->type = typdatum->value; 1667 c->type = typdatum->value;
1675 1668
1676 rc = mls_convert_context(args->oldp, args->newp, c); 1669 /* Convert the MLS fields if dealing with MLS policies */
1677 if (rc) 1670 if (args->oldp->mls_enabled && args->newp->mls_enabled) {
1678 goto bad; 1671 rc = mls_convert_context(args->oldp, args->newp, c);
1672 if (rc)
1673 goto bad;
1674 } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
1675 /*
1676 * Switching between MLS and non-MLS policy:
1677 * free any storage used by the MLS fields in the
1678 * context for all existing entries in the sidtab.
1679 */
1680 mls_context_destroy(c);
1681 } else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
1682 /*
1683 * Switching between non-MLS and MLS policy:
1684 * ensure that the MLS fields of the context for all
1685 * existing entries in the sidtab are filled in with a
1686 * suitable default value, likely taken from one of the
1687 * initial SIDs.
1688 */
1689 oc = args->newp->ocontexts[OCON_ISID];
1690 while (oc && oc->sid[0] != SECINITSID_UNLABELED)
1691 oc = oc->next;
1692 if (!oc) {
1693 printk(KERN_ERR "SELinux: unable to look up"
1694 " the initial SIDs list\n");
1695 goto bad;
1696 }
1697 range = &oc->context[0].range;
1698 rc = mls_range_set(c, range);
1699 if (rc)
1700 goto bad;
1701 }
1679 1702
1680 /* Check the validity of the new context. */ 1703 /* Check the validity of the new context. */
1681 if (!policydb_context_isvalid(args->newp, c)) { 1704 if (!policydb_context_isvalid(args->newp, c)) {
@@ -1771,9 +1794,17 @@ int security_load_policy(void *data, size_t len)
1771 if (policydb_read(&newpolicydb, fp)) 1794 if (policydb_read(&newpolicydb, fp))
1772 return -EINVAL; 1795 return -EINVAL;
1773 1796
1774 if (sidtab_init(&newsidtab)) { 1797 /* If switching between different policy types, log MLS status */
1798 if (policydb.mls_enabled && !newpolicydb.mls_enabled)
1799 printk(KERN_INFO "SELinux: Disabling MLS support...\n");
1800 else if (!policydb.mls_enabled && newpolicydb.mls_enabled)
1801 printk(KERN_INFO "SELinux: Enabling MLS support...\n");
1802
1803 rc = policydb_load_isids(&newpolicydb, &newsidtab);
1804 if (rc) {
1805 printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
1775 policydb_destroy(&newpolicydb); 1806 policydb_destroy(&newpolicydb);
1776 return -ENOMEM; 1807 return rc;
1777 } 1808 }
1778 1809
1779 if (selinux_set_mapping(&newpolicydb, secclass_map, 1810 if (selinux_set_mapping(&newpolicydb, secclass_map,
@@ -1800,8 +1831,12 @@ int security_load_policy(void *data, size_t len)
1800 args.oldp = &policydb; 1831 args.oldp = &policydb;
1801 args.newp = &newpolicydb; 1832 args.newp = &newpolicydb;
1802 rc = sidtab_map(&newsidtab, convert_context, &args); 1833 rc = sidtab_map(&newsidtab, convert_context, &args);
1803 if (rc) 1834 if (rc) {
1835 printk(KERN_ERR "SELinux: unable to convert the internal"
1836 " representation of contexts in the new SID"
1837 " table\n");
1804 goto err; 1838 goto err;
1839 }
1805 1840
1806 /* Save the old policydb and SID table to free later. */ 1841 /* Save the old policydb and SID table to free later. */
1807 memcpy(&oldpolicydb, &policydb, sizeof policydb); 1842 memcpy(&oldpolicydb, &policydb, sizeof policydb);
@@ -2397,7 +2432,7 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
2397 u32 len; 2432 u32 len;
2398 int rc = 0; 2433 int rc = 0;
2399 2434
2400 if (!ss_initialized || !selinux_mls_enabled) { 2435 if (!ss_initialized || !policydb.mls_enabled) {
2401 *new_sid = sid; 2436 *new_sid = sid;
2402 goto out; 2437 goto out;
2403 } 2438 }
@@ -2498,7 +2533,7 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
2498 /* we don't need to check ss_initialized here since the only way both 2533 /* we don't need to check ss_initialized here since the only way both
2499 * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the 2534 * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
2500 * security server was initialized and ss_initialized was true */ 2535 * security server was initialized and ss_initialized was true */
2501 if (!selinux_mls_enabled) { 2536 if (!policydb.mls_enabled) {
2502 *peer_sid = SECSID_NULL; 2537 *peer_sid = SECSID_NULL;
2503 return 0; 2538 return 0;
2504 } 2539 }
@@ -2555,7 +2590,7 @@ int security_get_classes(char ***classes, int *nclasses)
2555 read_lock(&policy_rwlock); 2590 read_lock(&policy_rwlock);
2556 2591
2557 *nclasses = policydb.p_classes.nprim; 2592 *nclasses = policydb.p_classes.nprim;
2558 *classes = kcalloc(*nclasses, sizeof(*classes), GFP_ATOMIC); 2593 *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
2559 if (!*classes) 2594 if (!*classes)
2560 goto out; 2595 goto out;
2561 2596
@@ -2602,7 +2637,7 @@ int security_get_permissions(char *class, char ***perms, int *nperms)
2602 } 2637 }
2603 2638
2604 *nperms = match->permissions.nprim; 2639 *nperms = match->permissions.nprim;
2605 *perms = kcalloc(*nperms, sizeof(*perms), GFP_ATOMIC); 2640 *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
2606 if (!*perms) 2641 if (!*perms)
2607 goto out; 2642 goto out;
2608 2643
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 529c9ca65878..5225e668dbf0 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
157 * 157 *
158 * Returns 0 on success, error code otherwise. 158 * Returns 0 on success, error code otherwise.
159 */ 159 */
160static int smack_syslog(int type) 160static int smack_syslog(int type, bool from_file)
161{ 161{
162 int rc; 162 int rc;
163 char *sp = current_security(); 163 char *sp = current_security();
164 164
165 rc = cap_syslog(type); 165 rc = cap_syslog(type, from_file);
166 if (rc != 0) 166 if (rc != 0)
167 return rc; 167 return rc;
168 168
@@ -387,7 +387,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
387 struct smk_audit_info ad; 387 struct smk_audit_info ad;
388 388
389 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 389 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
390 smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_mountpoint); 390 smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root);
391 smk_ad_setfield_u_fs_path_mnt(&ad, mnt); 391 smk_ad_setfield_u_fs_path_mnt(&ad, mnt);
392 392
393 sbp = mnt->mnt_sb->s_security; 393 sbp = mnt->mnt_sb->s_security;
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 10ccd686b290..60a9e2002da1 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1 +1 @@
obj-y = common.o realpath.o tomoyo.o domain.o file.o obj-y = common.o realpath.o tomoyo.o domain.o file.o gc.o
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index e0d0354008b7..ff51f1026b57 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -12,9 +12,10 @@
12#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/security.h> 13#include <linux/security.h>
14#include <linux/hardirq.h> 14#include <linux/hardirq.h>
15#include "realpath.h"
16#include "common.h" 15#include "common.h"
17#include "tomoyo.h" 16
17/* Lock for protecting policy. */
18DEFINE_MUTEX(tomoyo_policy_lock);
18 19
19/* Has loading policy done? */ 20/* Has loading policy done? */
20bool tomoyo_policy_loaded; 21bool tomoyo_policy_loaded;
@@ -178,14 +179,12 @@ static void tomoyo_normalize_line(unsigned char *buffer)
178 * 1 = must / -1 = must not / 0 = don't care 179 * 1 = must / -1 = must not / 0 = don't care
179 * @end_type: Should the pathname end with '/'? 180 * @end_type: Should the pathname end with '/'?
180 * 1 = must / -1 = must not / 0 = don't care 181 * 1 = must / -1 = must not / 0 = don't care
181 * @function: The name of function calling me.
182 * 182 *
183 * Check whether the given filename follows the naming rules. 183 * Check whether the given filename follows the naming rules.
184 * Returns true if @filename follows the naming rules, false otherwise. 184 * Returns true if @filename follows the naming rules, false otherwise.
185 */ 185 */
186bool tomoyo_is_correct_path(const char *filename, const s8 start_type, 186bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
187 const s8 pattern_type, const s8 end_type, 187 const s8 pattern_type, const s8 end_type)
188 const char *function)
189{ 188{
190 const char *const start = filename; 189 const char *const start = filename;
191 bool in_repetition = false; 190 bool in_repetition = false;
@@ -193,7 +192,6 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
193 unsigned char c; 192 unsigned char c;
194 unsigned char d; 193 unsigned char d;
195 unsigned char e; 194 unsigned char e;
196 const char *original_filename = filename;
197 195
198 if (!filename) 196 if (!filename)
199 goto out; 197 goto out;
@@ -282,25 +280,20 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
282 goto out; 280 goto out;
283 return true; 281 return true;
284 out: 282 out:
285 printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function,
286 original_filename);
287 return false; 283 return false;
288} 284}
289 285
290/** 286/**
291 * tomoyo_is_correct_domain - Check whether the given domainname follows the naming rules. 287 * tomoyo_is_correct_domain - Check whether the given domainname follows the naming rules.
292 * @domainname: The domainname to check. 288 * @domainname: The domainname to check.
293 * @function: The name of function calling me.
294 * 289 *
295 * Returns true if @domainname follows the naming rules, false otherwise. 290 * Returns true if @domainname follows the naming rules, false otherwise.
296 */ 291 */
297bool tomoyo_is_correct_domain(const unsigned char *domainname, 292bool tomoyo_is_correct_domain(const unsigned char *domainname)
298 const char *function)
299{ 293{
300 unsigned char c; 294 unsigned char c;
301 unsigned char d; 295 unsigned char d;
302 unsigned char e; 296 unsigned char e;
303 const char *org_domainname = domainname;
304 297
305 if (!domainname || strncmp(domainname, TOMOYO_ROOT_NAME, 298 if (!domainname || strncmp(domainname, TOMOYO_ROOT_NAME,
306 TOMOYO_ROOT_NAME_LEN)) 299 TOMOYO_ROOT_NAME_LEN))
@@ -343,8 +336,6 @@ bool tomoyo_is_correct_domain(const unsigned char *domainname,
343 } while (*domainname); 336 } while (*domainname);
344 return true; 337 return true;
345 out: 338 out:
346 printk(KERN_DEBUG "%s: Invalid domainname '%s'\n", function,
347 org_domainname);
348 return false; 339 return false;
349} 340}
350 341
@@ -365,10 +356,9 @@ bool tomoyo_is_domain_def(const unsigned char *buffer)
365 * 356 *
366 * @domainname: The domainname to find. 357 * @domainname: The domainname to find.
367 * 358 *
368 * Caller must call down_read(&tomoyo_domain_list_lock); or
369 * down_write(&tomoyo_domain_list_lock); .
370 *
371 * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise. 359 * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
360 *
361 * Caller holds tomoyo_read_lock().
372 */ 362 */
373struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname) 363struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
374{ 364{
@@ -377,7 +367,7 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
377 367
378 name.name = domainname; 368 name.name = domainname;
379 tomoyo_fill_path_info(&name); 369 tomoyo_fill_path_info(&name);
380 list_for_each_entry(domain, &tomoyo_domain_list, list) { 370 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
381 if (!domain->is_deleted && 371 if (!domain->is_deleted &&
382 !tomoyo_pathcmp(&name, domain->domainname)) 372 !tomoyo_pathcmp(&name, domain->domainname))
383 return domain; 373 return domain;
@@ -748,7 +738,7 @@ bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
748 * 738 *
749 * Returns the tomoyo_realpath() of current process on success, NULL otherwise. 739 * Returns the tomoyo_realpath() of current process on success, NULL otherwise.
750 * 740 *
751 * This function uses tomoyo_alloc(), so the caller must call tomoyo_free() 741 * This function uses kzalloc(), so the caller must call kfree()
752 * if this function didn't return NULL. 742 * if this function didn't return NULL.
753 */ 743 */
754static const char *tomoyo_get_exe(void) 744static const char *tomoyo_get_exe(void)
@@ -829,6 +819,8 @@ bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain)
829 * @domain: Pointer to "struct tomoyo_domain_info". 819 * @domain: Pointer to "struct tomoyo_domain_info".
830 * 820 *
831 * Returns true if the domain is not exceeded quota, false otherwise. 821 * Returns true if the domain is not exceeded quota, false otherwise.
822 *
823 * Caller holds tomoyo_read_lock().
832 */ 824 */
833bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain) 825bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
834{ 826{
@@ -837,61 +829,29 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
837 829
838 if (!domain) 830 if (!domain)
839 return true; 831 return true;
840 down_read(&tomoyo_domain_acl_info_list_lock); 832 list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
841 list_for_each_entry(ptr, &domain->acl_info_list, list) { 833 switch (ptr->type) {
842 if (ptr->type & TOMOYO_ACL_DELETED) 834 struct tomoyo_path_acl *acl;
843 continue; 835 u32 perm;
844 switch (tomoyo_acl_type2(ptr)) { 836 u8 i;
845 struct tomoyo_single_path_acl_record *acl1; 837 case TOMOYO_TYPE_PATH_ACL:
846 struct tomoyo_double_path_acl_record *acl2; 838 acl = container_of(ptr, struct tomoyo_path_acl, head);
847 u16 perm; 839 perm = acl->perm | (((u32) acl->perm_high) << 16);
848 case TOMOYO_TYPE_SINGLE_PATH_ACL: 840 for (i = 0; i < TOMOYO_MAX_PATH_OPERATION; i++)
849 acl1 = container_of(ptr, 841 if (perm & (1 << i))
850 struct tomoyo_single_path_acl_record, 842 count++;
851 head); 843 if (perm & (1 << TOMOYO_TYPE_READ_WRITE))
852 perm = acl1->perm; 844 count -= 2;
853 if (perm & (1 << TOMOYO_TYPE_EXECUTE_ACL))
854 count++;
855 if (perm &
856 ((1 << TOMOYO_TYPE_READ_ACL) |
857 (1 << TOMOYO_TYPE_WRITE_ACL)))
858 count++;
859 if (perm & (1 << TOMOYO_TYPE_CREATE_ACL))
860 count++;
861 if (perm & (1 << TOMOYO_TYPE_UNLINK_ACL))
862 count++;
863 if (perm & (1 << TOMOYO_TYPE_MKDIR_ACL))
864 count++;
865 if (perm & (1 << TOMOYO_TYPE_RMDIR_ACL))
866 count++;
867 if (perm & (1 << TOMOYO_TYPE_MKFIFO_ACL))
868 count++;
869 if (perm & (1 << TOMOYO_TYPE_MKSOCK_ACL))
870 count++;
871 if (perm & (1 << TOMOYO_TYPE_MKBLOCK_ACL))
872 count++;
873 if (perm & (1 << TOMOYO_TYPE_MKCHAR_ACL))
874 count++;
875 if (perm & (1 << TOMOYO_TYPE_TRUNCATE_ACL))
876 count++;
877 if (perm & (1 << TOMOYO_TYPE_SYMLINK_ACL))
878 count++;
879 if (perm & (1 << TOMOYO_TYPE_REWRITE_ACL))
880 count++;
881 break; 845 break;
882 case TOMOYO_TYPE_DOUBLE_PATH_ACL: 846 case TOMOYO_TYPE_PATH2_ACL:
883 acl2 = container_of(ptr, 847 perm = container_of(ptr, struct tomoyo_path2_acl, head)
884 struct tomoyo_double_path_acl_record, 848 ->perm;
885 head); 849 for (i = 0; i < TOMOYO_MAX_PATH2_OPERATION; i++)
886 perm = acl2->perm; 850 if (perm & (1 << i))
887 if (perm & (1 << TOMOYO_TYPE_LINK_ACL)) 851 count++;
888 count++;
889 if (perm & (1 << TOMOYO_TYPE_RENAME_ACL))
890 count++;
891 break; 852 break;
892 } 853 }
893 } 854 }
894 up_read(&tomoyo_domain_acl_info_list_lock);
895 if (count < tomoyo_check_flags(domain, TOMOYO_MAX_ACCEPT_ENTRY)) 855 if (count < tomoyo_check_flags(domain, TOMOYO_MAX_ACCEPT_ENTRY))
896 return true; 856 return true;
897 if (!domain->quota_warned) { 857 if (!domain->quota_warned) {
@@ -923,9 +883,11 @@ static struct tomoyo_profile *tomoyo_find_or_assign_new_profile(const unsigned
923 ptr = tomoyo_profile_ptr[profile]; 883 ptr = tomoyo_profile_ptr[profile];
924 if (ptr) 884 if (ptr)
925 goto ok; 885 goto ok;
926 ptr = tomoyo_alloc_element(sizeof(*ptr)); 886 ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
927 if (!ptr) 887 if (!tomoyo_memory_ok(ptr)) {
888 kfree(ptr);
928 goto ok; 889 goto ok;
890 }
929 for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++) 891 for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++)
930 ptr->value[i] = tomoyo_control_array[i].current_value; 892 ptr->value[i] = tomoyo_control_array[i].current_value;
931 mb(); /* Avoid out-of-order execution. */ 893 mb(); /* Avoid out-of-order execution. */
@@ -966,7 +928,9 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
966 return -EINVAL; 928 return -EINVAL;
967 *cp = '\0'; 929 *cp = '\0';
968 if (!strcmp(data, "COMMENT")) { 930 if (!strcmp(data, "COMMENT")) {
969 profile->comment = tomoyo_save_name(cp + 1); 931 const struct tomoyo_path_info *old_comment = profile->comment;
932 profile->comment = tomoyo_get_name(cp + 1);
933 tomoyo_put_name(old_comment);
970 return 0; 934 return 0;
971 } 935 }
972 for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++) { 936 for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++) {
@@ -1061,27 +1025,6 @@ static int tomoyo_read_profile(struct tomoyo_io_buffer *head)
1061} 1025}
1062 1026
1063/* 1027/*
1064 * tomoyo_policy_manager_entry is a structure which is used for holding list of
1065 * domainnames or programs which are permitted to modify configuration via
1066 * /sys/kernel/security/tomoyo/ interface.
1067 * It has following fields.
1068 *
1069 * (1) "list" which is linked to tomoyo_policy_manager_list .
1070 * (2) "manager" is a domainname or a program's pathname.
1071 * (3) "is_domain" is a bool which is true if "manager" is a domainname, false
1072 * otherwise.
1073 * (4) "is_deleted" is a bool which is true if marked as deleted, false
1074 * otherwise.
1075 */
1076struct tomoyo_policy_manager_entry {
1077 struct list_head list;
1078 /* A path to program or a domainname. */
1079 const struct tomoyo_path_info *manager;
1080 bool is_domain; /* True if manager is a domainname. */
1081 bool is_deleted; /* True if this entry is deleted. */
1082};
1083
1084/*
1085 * tomoyo_policy_manager_list is used for holding list of domainnames or 1028 * tomoyo_policy_manager_list is used for holding list of domainnames or
1086 * programs which are permitted to modify configuration via 1029 * programs which are permitted to modify configuration via
1087 * /sys/kernel/security/tomoyo/ interface. 1030 * /sys/kernel/security/tomoyo/ interface.
@@ -1111,8 +1054,7 @@ struct tomoyo_policy_manager_entry {
1111 * 1054 *
1112 * # cat /sys/kernel/security/tomoyo/manager 1055 * # cat /sys/kernel/security/tomoyo/manager
1113 */ 1056 */
1114static LIST_HEAD(tomoyo_policy_manager_list); 1057LIST_HEAD(tomoyo_policy_manager_list);
1115static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
1116 1058
1117/** 1059/**
1118 * tomoyo_update_manager_entry - Add a manager entry. 1060 * tomoyo_update_manager_entry - Add a manager entry.
@@ -1121,48 +1063,50 @@ static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
1121 * @is_delete: True if it is a delete request. 1063 * @is_delete: True if it is a delete request.
1122 * 1064 *
1123 * Returns 0 on success, negative value otherwise. 1065 * Returns 0 on success, negative value otherwise.
1066 *
1067 * Caller holds tomoyo_read_lock().
1124 */ 1068 */
1125static int tomoyo_update_manager_entry(const char *manager, 1069static int tomoyo_update_manager_entry(const char *manager,
1126 const bool is_delete) 1070 const bool is_delete)
1127{ 1071{
1128 struct tomoyo_policy_manager_entry *new_entry; 1072 struct tomoyo_policy_manager_entry *entry = NULL;
1129 struct tomoyo_policy_manager_entry *ptr; 1073 struct tomoyo_policy_manager_entry *ptr;
1130 const struct tomoyo_path_info *saved_manager; 1074 const struct tomoyo_path_info *saved_manager;
1131 int error = -ENOMEM; 1075 int error = is_delete ? -ENOENT : -ENOMEM;
1132 bool is_domain = false; 1076 bool is_domain = false;
1133 1077
1134 if (tomoyo_is_domain_def(manager)) { 1078 if (tomoyo_is_domain_def(manager)) {
1135 if (!tomoyo_is_correct_domain(manager, __func__)) 1079 if (!tomoyo_is_correct_domain(manager))
1136 return -EINVAL; 1080 return -EINVAL;
1137 is_domain = true; 1081 is_domain = true;
1138 } else { 1082 } else {
1139 if (!tomoyo_is_correct_path(manager, 1, -1, -1, __func__)) 1083 if (!tomoyo_is_correct_path(manager, 1, -1, -1))
1140 return -EINVAL; 1084 return -EINVAL;
1141 } 1085 }
1142 saved_manager = tomoyo_save_name(manager); 1086 saved_manager = tomoyo_get_name(manager);
1143 if (!saved_manager) 1087 if (!saved_manager)
1144 return -ENOMEM; 1088 return -ENOMEM;
1145 down_write(&tomoyo_policy_manager_list_lock); 1089 if (!is_delete)
1146 list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) { 1090 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1091 mutex_lock(&tomoyo_policy_lock);
1092 list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
1147 if (ptr->manager != saved_manager) 1093 if (ptr->manager != saved_manager)
1148 continue; 1094 continue;
1149 ptr->is_deleted = is_delete; 1095 ptr->is_deleted = is_delete;
1150 error = 0; 1096 error = 0;
1151 goto out; 1097 break;
1152 } 1098 }
1153 if (is_delete) { 1099 if (!is_delete && error && tomoyo_memory_ok(entry)) {
1154 error = -ENOENT; 1100 entry->manager = saved_manager;
1155 goto out; 1101 saved_manager = NULL;
1102 entry->is_domain = is_domain;
1103 list_add_tail_rcu(&entry->list, &tomoyo_policy_manager_list);
1104 entry = NULL;
1105 error = 0;
1156 } 1106 }
1157 new_entry = tomoyo_alloc_element(sizeof(*new_entry)); 1107 mutex_unlock(&tomoyo_policy_lock);
1158 if (!new_entry) 1108 tomoyo_put_name(saved_manager);
1159 goto out; 1109 kfree(entry);
1160 new_entry->manager = saved_manager;
1161 new_entry->is_domain = is_domain;
1162 list_add_tail(&new_entry->list, &tomoyo_policy_manager_list);
1163 error = 0;
1164 out:
1165 up_write(&tomoyo_policy_manager_list_lock);
1166 return error; 1110 return error;
1167} 1111}
1168 1112
@@ -1172,6 +1116,8 @@ static int tomoyo_update_manager_entry(const char *manager,
1172 * @head: Pointer to "struct tomoyo_io_buffer". 1116 * @head: Pointer to "struct tomoyo_io_buffer".
1173 * 1117 *
1174 * Returns 0 on success, negative value otherwise. 1118 * Returns 0 on success, negative value otherwise.
1119 *
1120 * Caller holds tomoyo_read_lock().
1175 */ 1121 */
1176static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head) 1122static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
1177{ 1123{
@@ -1191,6 +1137,8 @@ static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
1191 * @head: Pointer to "struct tomoyo_io_buffer". 1137 * @head: Pointer to "struct tomoyo_io_buffer".
1192 * 1138 *
1193 * Returns 0. 1139 * Returns 0.
1140 *
1141 * Caller holds tomoyo_read_lock().
1194 */ 1142 */
1195static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head) 1143static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
1196{ 1144{
@@ -1199,7 +1147,6 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
1199 1147
1200 if (head->read_eof) 1148 if (head->read_eof)
1201 return 0; 1149 return 0;
1202 down_read(&tomoyo_policy_manager_list_lock);
1203 list_for_each_cookie(pos, head->read_var2, 1150 list_for_each_cookie(pos, head->read_var2,
1204 &tomoyo_policy_manager_list) { 1151 &tomoyo_policy_manager_list) {
1205 struct tomoyo_policy_manager_entry *ptr; 1152 struct tomoyo_policy_manager_entry *ptr;
@@ -1211,7 +1158,6 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
1211 if (!done) 1158 if (!done)
1212 break; 1159 break;
1213 } 1160 }
1214 up_read(&tomoyo_policy_manager_list_lock);
1215 head->read_eof = done; 1161 head->read_eof = done;
1216 return 0; 1162 return 0;
1217} 1163}
@@ -1221,6 +1167,8 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
1221 * 1167 *
1222 * Returns true if the current process is permitted to modify policy 1168 * Returns true if the current process is permitted to modify policy
1223 * via /sys/kernel/security/tomoyo/ interface. 1169 * via /sys/kernel/security/tomoyo/ interface.
1170 *
1171 * Caller holds tomoyo_read_lock().
1224 */ 1172 */
1225static bool tomoyo_is_policy_manager(void) 1173static bool tomoyo_is_policy_manager(void)
1226{ 1174{
@@ -1234,29 +1182,25 @@ static bool tomoyo_is_policy_manager(void)
1234 return true; 1182 return true;
1235 if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid)) 1183 if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid))
1236 return false; 1184 return false;
1237 down_read(&tomoyo_policy_manager_list_lock); 1185 list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
1238 list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
1239 if (!ptr->is_deleted && ptr->is_domain 1186 if (!ptr->is_deleted && ptr->is_domain
1240 && !tomoyo_pathcmp(domainname, ptr->manager)) { 1187 && !tomoyo_pathcmp(domainname, ptr->manager)) {
1241 found = true; 1188 found = true;
1242 break; 1189 break;
1243 } 1190 }
1244 } 1191 }
1245 up_read(&tomoyo_policy_manager_list_lock);
1246 if (found) 1192 if (found)
1247 return true; 1193 return true;
1248 exe = tomoyo_get_exe(); 1194 exe = tomoyo_get_exe();
1249 if (!exe) 1195 if (!exe)
1250 return false; 1196 return false;
1251 down_read(&tomoyo_policy_manager_list_lock); 1197 list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
1252 list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
1253 if (!ptr->is_deleted && !ptr->is_domain 1198 if (!ptr->is_deleted && !ptr->is_domain
1254 && !strcmp(exe, ptr->manager->name)) { 1199 && !strcmp(exe, ptr->manager->name)) {
1255 found = true; 1200 found = true;
1256 break; 1201 break;
1257 } 1202 }
1258 } 1203 }
1259 up_read(&tomoyo_policy_manager_list_lock);
1260 if (!found) { /* Reduce error messages. */ 1204 if (!found) { /* Reduce error messages. */
1261 static pid_t last_pid; 1205 static pid_t last_pid;
1262 const pid_t pid = current->pid; 1206 const pid_t pid = current->pid;
@@ -1266,7 +1210,7 @@ static bool tomoyo_is_policy_manager(void)
1266 last_pid = pid; 1210 last_pid = pid;
1267 } 1211 }
1268 } 1212 }
1269 tomoyo_free(exe); 1213 kfree(exe);
1270 return found; 1214 return found;
1271} 1215}
1272 1216
@@ -1277,6 +1221,8 @@ static bool tomoyo_is_policy_manager(void)
1277 * @data: String to parse. 1221 * @data: String to parse.
1278 * 1222 *
1279 * Returns true on success, false otherwise. 1223 * Returns true on success, false otherwise.
1224 *
1225 * Caller holds tomoyo_read_lock().
1280 */ 1226 */
1281static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head, 1227static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
1282 const char *data) 1228 const char *data)
@@ -1286,17 +1232,16 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
1286 1232
1287 if (sscanf(data, "pid=%u", &pid) == 1) { 1233 if (sscanf(data, "pid=%u", &pid) == 1) {
1288 struct task_struct *p; 1234 struct task_struct *p;
1235 rcu_read_lock();
1289 read_lock(&tasklist_lock); 1236 read_lock(&tasklist_lock);
1290 p = find_task_by_vpid(pid); 1237 p = find_task_by_vpid(pid);
1291 if (p) 1238 if (p)
1292 domain = tomoyo_real_domain(p); 1239 domain = tomoyo_real_domain(p);
1293 read_unlock(&tasklist_lock); 1240 read_unlock(&tasklist_lock);
1241 rcu_read_unlock();
1294 } else if (!strncmp(data, "domain=", 7)) { 1242 } else if (!strncmp(data, "domain=", 7)) {
1295 if (tomoyo_is_domain_def(data + 7)) { 1243 if (tomoyo_is_domain_def(data + 7))
1296 down_read(&tomoyo_domain_list_lock);
1297 domain = tomoyo_find_domain(data + 7); 1244 domain = tomoyo_find_domain(data + 7);
1298 up_read(&tomoyo_domain_list_lock);
1299 }
1300 } else 1245 } else
1301 return false; 1246 return false;
1302 head->write_var1 = domain; 1247 head->write_var1 = domain;
@@ -1310,13 +1255,11 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
1310 if (domain) { 1255 if (domain) {
1311 struct tomoyo_domain_info *d; 1256 struct tomoyo_domain_info *d;
1312 head->read_var1 = NULL; 1257 head->read_var1 = NULL;
1313 down_read(&tomoyo_domain_list_lock); 1258 list_for_each_entry_rcu(d, &tomoyo_domain_list, list) {
1314 list_for_each_entry(d, &tomoyo_domain_list, list) {
1315 if (d == domain) 1259 if (d == domain)
1316 break; 1260 break;
1317 head->read_var1 = &d->list; 1261 head->read_var1 = &d->list;
1318 } 1262 }
1319 up_read(&tomoyo_domain_list_lock);
1320 head->read_var2 = NULL; 1263 head->read_var2 = NULL;
1321 head->read_bit = 0; 1264 head->read_bit = 0;
1322 head->read_step = 0; 1265 head->read_step = 0;
@@ -1332,6 +1275,8 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
1332 * @domainname: The name of domain. 1275 * @domainname: The name of domain.
1333 * 1276 *
1334 * Returns 0. 1277 * Returns 0.
1278 *
1279 * Caller holds tomoyo_read_lock().
1335 */ 1280 */
1336static int tomoyo_delete_domain(char *domainname) 1281static int tomoyo_delete_domain(char *domainname)
1337{ 1282{
@@ -1340,9 +1285,9 @@ static int tomoyo_delete_domain(char *domainname)
1340 1285
1341 name.name = domainname; 1286 name.name = domainname;
1342 tomoyo_fill_path_info(&name); 1287 tomoyo_fill_path_info(&name);
1343 down_write(&tomoyo_domain_list_lock); 1288 mutex_lock(&tomoyo_policy_lock);
1344 /* Is there an active domain? */ 1289 /* Is there an active domain? */
1345 list_for_each_entry(domain, &tomoyo_domain_list, list) { 1290 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
1346 /* Never delete tomoyo_kernel_domain */ 1291 /* Never delete tomoyo_kernel_domain */
1347 if (domain == &tomoyo_kernel_domain) 1292 if (domain == &tomoyo_kernel_domain)
1348 continue; 1293 continue;
@@ -1352,7 +1297,7 @@ static int tomoyo_delete_domain(char *domainname)
1352 domain->is_deleted = true; 1297 domain->is_deleted = true;
1353 break; 1298 break;
1354 } 1299 }
1355 up_write(&tomoyo_domain_list_lock); 1300 mutex_unlock(&tomoyo_policy_lock);
1356 return 0; 1301 return 0;
1357} 1302}
1358 1303
@@ -1362,6 +1307,8 @@ static int tomoyo_delete_domain(char *domainname)
1362 * @head: Pointer to "struct tomoyo_io_buffer". 1307 * @head: Pointer to "struct tomoyo_io_buffer".
1363 * 1308 *
1364 * Returns 0 on success, negative value otherwise. 1309 * Returns 0 on success, negative value otherwise.
1310 *
1311 * Caller holds tomoyo_read_lock().
1365 */ 1312 */
1366static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head) 1313static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
1367{ 1314{
@@ -1384,11 +1331,9 @@ static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
1384 domain = NULL; 1331 domain = NULL;
1385 if (is_delete) 1332 if (is_delete)
1386 tomoyo_delete_domain(data); 1333 tomoyo_delete_domain(data);
1387 else if (is_select) { 1334 else if (is_select)
1388 down_read(&tomoyo_domain_list_lock);
1389 domain = tomoyo_find_domain(data); 1335 domain = tomoyo_find_domain(data);
1390 up_read(&tomoyo_domain_list_lock); 1336 else
1391 } else
1392 domain = tomoyo_find_or_assign_new_domain(data, 0); 1337 domain = tomoyo_find_or_assign_new_domain(data, 0);
1393 head->write_var1 = domain; 1338 head->write_var1 = domain;
1394 return 0; 1339 return 0;
@@ -1403,43 +1348,39 @@ static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
1403 return 0; 1348 return 0;
1404 } 1349 }
1405 if (!strcmp(data, TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ)) { 1350 if (!strcmp(data, TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ)) {
1406 tomoyo_set_domain_flag(domain, is_delete, 1351 domain->ignore_global_allow_read = !is_delete;
1407 TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ);
1408 return 0; 1352 return 0;
1409 } 1353 }
1410 return tomoyo_write_file_policy(data, domain, is_delete); 1354 return tomoyo_write_file_policy(data, domain, is_delete);
1411} 1355}
1412 1356
1413/** 1357/**
1414 * tomoyo_print_single_path_acl - Print a single path ACL entry. 1358 * tomoyo_print_path_acl - Print a single path ACL entry.
1415 * 1359 *
1416 * @head: Pointer to "struct tomoyo_io_buffer". 1360 * @head: Pointer to "struct tomoyo_io_buffer".
1417 * @ptr: Pointer to "struct tomoyo_single_path_acl_record". 1361 * @ptr: Pointer to "struct tomoyo_path_acl".
1418 * 1362 *
1419 * Returns true on success, false otherwise. 1363 * Returns true on success, false otherwise.
1420 */ 1364 */
1421static bool tomoyo_print_single_path_acl(struct tomoyo_io_buffer *head, 1365static bool tomoyo_print_path_acl(struct tomoyo_io_buffer *head,
1422 struct tomoyo_single_path_acl_record * 1366 struct tomoyo_path_acl *ptr)
1423 ptr)
1424{ 1367{
1425 int pos; 1368 int pos;
1426 u8 bit; 1369 u8 bit;
1427 const char *atmark = ""; 1370 const char *atmark = "";
1428 const char *filename; 1371 const char *filename;
1429 const u16 perm = ptr->perm; 1372 const u32 perm = ptr->perm | (((u32) ptr->perm_high) << 16);
1430 1373
1431 filename = ptr->filename->name; 1374 filename = ptr->filename->name;
1432 for (bit = head->read_bit; bit < TOMOYO_MAX_SINGLE_PATH_OPERATION; 1375 for (bit = head->read_bit; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
1433 bit++) {
1434 const char *msg; 1376 const char *msg;
1435 if (!(perm & (1 << bit))) 1377 if (!(perm & (1 << bit)))
1436 continue; 1378 continue;
1437 /* Print "read/write" instead of "read" and "write". */ 1379 /* Print "read/write" instead of "read" and "write". */
1438 if ((bit == TOMOYO_TYPE_READ_ACL || 1380 if ((bit == TOMOYO_TYPE_READ || bit == TOMOYO_TYPE_WRITE)
1439 bit == TOMOYO_TYPE_WRITE_ACL) 1381 && (perm & (1 << TOMOYO_TYPE_READ_WRITE)))
1440 && (perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
1441 continue; 1382 continue;
1442 msg = tomoyo_sp2keyword(bit); 1383 msg = tomoyo_path2keyword(bit);
1443 pos = head->read_avail; 1384 pos = head->read_avail;
1444 if (!tomoyo_io_printf(head, "allow_%s %s%s\n", msg, 1385 if (!tomoyo_io_printf(head, "allow_%s %s%s\n", msg,
1445 atmark, filename)) 1386 atmark, filename))
@@ -1454,16 +1395,15 @@ static bool tomoyo_print_single_path_acl(struct tomoyo_io_buffer *head,
1454} 1395}
1455 1396
1456/** 1397/**
1457 * tomoyo_print_double_path_acl - Print a double path ACL entry. 1398 * tomoyo_print_path2_acl - Print a double path ACL entry.
1458 * 1399 *
1459 * @head: Pointer to "struct tomoyo_io_buffer". 1400 * @head: Pointer to "struct tomoyo_io_buffer".
1460 * @ptr: Pointer to "struct tomoyo_double_path_acl_record". 1401 * @ptr: Pointer to "struct tomoyo_path2_acl".
1461 * 1402 *
1462 * Returns true on success, false otherwise. 1403 * Returns true on success, false otherwise.
1463 */ 1404 */
1464static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head, 1405static bool tomoyo_print_path2_acl(struct tomoyo_io_buffer *head,
1465 struct tomoyo_double_path_acl_record * 1406 struct tomoyo_path2_acl *ptr)
1466 ptr)
1467{ 1407{
1468 int pos; 1408 int pos;
1469 const char *atmark1 = ""; 1409 const char *atmark1 = "";
@@ -1475,12 +1415,11 @@ static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head,
1475 1415
1476 filename1 = ptr->filename1->name; 1416 filename1 = ptr->filename1->name;
1477 filename2 = ptr->filename2->name; 1417 filename2 = ptr->filename2->name;
1478 for (bit = head->read_bit; bit < TOMOYO_MAX_DOUBLE_PATH_OPERATION; 1418 for (bit = head->read_bit; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
1479 bit++) {
1480 const char *msg; 1419 const char *msg;
1481 if (!(perm & (1 << bit))) 1420 if (!(perm & (1 << bit)))
1482 continue; 1421 continue;
1483 msg = tomoyo_dp2keyword(bit); 1422 msg = tomoyo_path22keyword(bit);
1484 pos = head->read_avail; 1423 pos = head->read_avail;
1485 if (!tomoyo_io_printf(head, "allow_%s %s%s %s%s\n", msg, 1424 if (!tomoyo_io_printf(head, "allow_%s %s%s %s%s\n", msg,
1486 atmark1, filename1, atmark2, filename2)) 1425 atmark1, filename1, atmark2, filename2))
@@ -1505,23 +1444,17 @@ static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head,
1505static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, 1444static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
1506 struct tomoyo_acl_info *ptr) 1445 struct tomoyo_acl_info *ptr)
1507{ 1446{
1508 const u8 acl_type = tomoyo_acl_type2(ptr); 1447 const u8 acl_type = ptr->type;
1509 1448
1510 if (acl_type & TOMOYO_ACL_DELETED) 1449 if (acl_type == TOMOYO_TYPE_PATH_ACL) {
1511 return true; 1450 struct tomoyo_path_acl *acl
1512 if (acl_type == TOMOYO_TYPE_SINGLE_PATH_ACL) { 1451 = container_of(ptr, struct tomoyo_path_acl, head);
1513 struct tomoyo_single_path_acl_record *acl 1452 return tomoyo_print_path_acl(head, acl);
1514 = container_of(ptr,
1515 struct tomoyo_single_path_acl_record,
1516 head);
1517 return tomoyo_print_single_path_acl(head, acl);
1518 } 1453 }
1519 if (acl_type == TOMOYO_TYPE_DOUBLE_PATH_ACL) { 1454 if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
1520 struct tomoyo_double_path_acl_record *acl 1455 struct tomoyo_path2_acl *acl
1521 = container_of(ptr, 1456 = container_of(ptr, struct tomoyo_path2_acl, head);
1522 struct tomoyo_double_path_acl_record, 1457 return tomoyo_print_path2_acl(head, acl);
1523 head);
1524 return tomoyo_print_double_path_acl(head, acl);
1525 } 1458 }
1526 BUG(); /* This must not happen. */ 1459 BUG(); /* This must not happen. */
1527 return false; 1460 return false;
@@ -1533,6 +1466,8 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
1533 * @head: Pointer to "struct tomoyo_io_buffer". 1466 * @head: Pointer to "struct tomoyo_io_buffer".
1534 * 1467 *
1535 * Returns 0. 1468 * Returns 0.
1469 *
1470 * Caller holds tomoyo_read_lock().
1536 */ 1471 */
1537static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head) 1472static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
1538{ 1473{
@@ -1544,7 +1479,6 @@ static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
1544 return 0; 1479 return 0;
1545 if (head->read_step == 0) 1480 if (head->read_step == 0)
1546 head->read_step = 1; 1481 head->read_step = 1;
1547 down_read(&tomoyo_domain_list_lock);
1548 list_for_each_cookie(dpos, head->read_var1, &tomoyo_domain_list) { 1482 list_for_each_cookie(dpos, head->read_var1, &tomoyo_domain_list) {
1549 struct tomoyo_domain_info *domain; 1483 struct tomoyo_domain_info *domain;
1550 const char *quota_exceeded = ""; 1484 const char *quota_exceeded = "";
@@ -1558,10 +1492,9 @@ static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
1558 /* Print domainname and flags. */ 1492 /* Print domainname and flags. */
1559 if (domain->quota_warned) 1493 if (domain->quota_warned)
1560 quota_exceeded = "quota_exceeded\n"; 1494 quota_exceeded = "quota_exceeded\n";
1561 if (domain->flags & TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED) 1495 if (domain->transition_failed)
1562 transition_failed = "transition_failed\n"; 1496 transition_failed = "transition_failed\n";
1563 if (domain->flags & 1497 if (domain->ignore_global_allow_read)
1564 TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ)
1565 ignore_global_allow_read 1498 ignore_global_allow_read
1566 = TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "\n"; 1499 = TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "\n";
1567 done = tomoyo_io_printf(head, "%s\n" TOMOYO_KEYWORD_USE_PROFILE 1500 done = tomoyo_io_printf(head, "%s\n" TOMOYO_KEYWORD_USE_PROFILE
@@ -1577,7 +1510,6 @@ acl_loop:
1577 if (head->read_step == 3) 1510 if (head->read_step == 3)
1578 goto tail_mark; 1511 goto tail_mark;
1579 /* Print ACL entries in the domain. */ 1512 /* Print ACL entries in the domain. */
1580 down_read(&tomoyo_domain_acl_info_list_lock);
1581 list_for_each_cookie(apos, head->read_var2, 1513 list_for_each_cookie(apos, head->read_var2,
1582 &domain->acl_info_list) { 1514 &domain->acl_info_list) {
1583 struct tomoyo_acl_info *ptr 1515 struct tomoyo_acl_info *ptr
@@ -1587,7 +1519,6 @@ acl_loop:
1587 if (!done) 1519 if (!done)
1588 break; 1520 break;
1589 } 1521 }
1590 up_read(&tomoyo_domain_acl_info_list_lock);
1591 if (!done) 1522 if (!done)
1592 break; 1523 break;
1593 head->read_step = 3; 1524 head->read_step = 3;
@@ -1599,7 +1530,6 @@ tail_mark:
1599 if (head->read_single_domain) 1530 if (head->read_single_domain)
1600 break; 1531 break;
1601 } 1532 }
1602 up_read(&tomoyo_domain_list_lock);
1603 head->read_eof = done; 1533 head->read_eof = done;
1604 return 0; 1534 return 0;
1605} 1535}
@@ -1615,6 +1545,8 @@ tail_mark:
1615 * 1545 *
1616 * ( echo "select " $domainname; echo "use_profile " $profile ) | 1546 * ( echo "select " $domainname; echo "use_profile " $profile ) |
1617 * /usr/lib/ccs/loadpolicy -d 1547 * /usr/lib/ccs/loadpolicy -d
1548 *
1549 * Caller holds tomoyo_read_lock().
1618 */ 1550 */
1619static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head) 1551static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
1620{ 1552{
@@ -1626,9 +1558,7 @@ static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
1626 if (!cp) 1558 if (!cp)
1627 return -EINVAL; 1559 return -EINVAL;
1628 *cp = '\0'; 1560 *cp = '\0';
1629 down_read(&tomoyo_domain_list_lock);
1630 domain = tomoyo_find_domain(cp + 1); 1561 domain = tomoyo_find_domain(cp + 1);
1631 up_read(&tomoyo_domain_list_lock);
1632 if (strict_strtoul(data, 10, &profile)) 1562 if (strict_strtoul(data, 10, &profile))
1633 return -EINVAL; 1563 return -EINVAL;
1634 if (domain && profile < TOMOYO_MAX_PROFILES 1564 if (domain && profile < TOMOYO_MAX_PROFILES
@@ -1650,6 +1580,8 @@ static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
1650 * awk ' { if ( domainname == "" ) { if ( $1 == "<kernel>" ) 1580 * awk ' { if ( domainname == "" ) { if ( $1 == "<kernel>" )
1651 * domainname = $0; } else if ( $1 == "use_profile" ) { 1581 * domainname = $0; } else if ( $1 == "use_profile" ) {
1652 * print $2 " " domainname; domainname = ""; } } ; ' 1582 * print $2 " " domainname; domainname = ""; } } ; '
1583 *
1584 * Caller holds tomoyo_read_lock().
1653 */ 1585 */
1654static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head) 1586static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
1655{ 1587{
@@ -1658,7 +1590,6 @@ static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
1658 1590
1659 if (head->read_eof) 1591 if (head->read_eof)
1660 return 0; 1592 return 0;
1661 down_read(&tomoyo_domain_list_lock);
1662 list_for_each_cookie(pos, head->read_var1, &tomoyo_domain_list) { 1593 list_for_each_cookie(pos, head->read_var1, &tomoyo_domain_list) {
1663 struct tomoyo_domain_info *domain; 1594 struct tomoyo_domain_info *domain;
1664 domain = list_entry(pos, struct tomoyo_domain_info, list); 1595 domain = list_entry(pos, struct tomoyo_domain_info, list);
@@ -1669,7 +1600,6 @@ static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
1669 if (!done) 1600 if (!done)
1670 break; 1601 break;
1671 } 1602 }
1672 up_read(&tomoyo_domain_list_lock);
1673 head->read_eof = done; 1603 head->read_eof = done;
1674 return 0; 1604 return 0;
1675} 1605}
@@ -1707,11 +1637,13 @@ static int tomoyo_read_pid(struct tomoyo_io_buffer *head)
1707 const int pid = head->read_step; 1637 const int pid = head->read_step;
1708 struct task_struct *p; 1638 struct task_struct *p;
1709 struct tomoyo_domain_info *domain = NULL; 1639 struct tomoyo_domain_info *domain = NULL;
1640 rcu_read_lock();
1710 read_lock(&tasklist_lock); 1641 read_lock(&tasklist_lock);
1711 p = find_task_by_vpid(pid); 1642 p = find_task_by_vpid(pid);
1712 if (p) 1643 if (p)
1713 domain = tomoyo_real_domain(p); 1644 domain = tomoyo_real_domain(p);
1714 read_unlock(&tasklist_lock); 1645 read_unlock(&tasklist_lock);
1646 rcu_read_unlock();
1715 if (domain) 1647 if (domain)
1716 tomoyo_io_printf(head, "%d %u %s", pid, domain->profile, 1648 tomoyo_io_printf(head, "%d %u %s", pid, domain->profile,
1717 domain->domainname->name); 1649 domain->domainname->name);
@@ -1726,6 +1658,8 @@ static int tomoyo_read_pid(struct tomoyo_io_buffer *head)
1726 * @head: Pointer to "struct tomoyo_io_buffer". 1658 * @head: Pointer to "struct tomoyo_io_buffer".
1727 * 1659 *
1728 * Returns 0 on success, negative value otherwise. 1660 * Returns 0 on success, negative value otherwise.
1661 *
1662 * Caller holds tomoyo_read_lock().
1729 */ 1663 */
1730static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head) 1664static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
1731{ 1665{
@@ -1760,6 +1694,8 @@ static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
1760 * @head: Pointer to "struct tomoyo_io_buffer". 1694 * @head: Pointer to "struct tomoyo_io_buffer".
1761 * 1695 *
1762 * Returns 0 on success, -EINVAL otherwise. 1696 * Returns 0 on success, -EINVAL otherwise.
1697 *
1698 * Caller holds tomoyo_read_lock().
1763 */ 1699 */
1764static int tomoyo_read_exception_policy(struct tomoyo_io_buffer *head) 1700static int tomoyo_read_exception_policy(struct tomoyo_io_buffer *head)
1765{ 1701{
@@ -1889,15 +1825,13 @@ void tomoyo_load_policy(const char *filename)
1889 tomoyo_policy_loaded = true; 1825 tomoyo_policy_loaded = true;
1890 { /* Check all profiles currently assigned to domains are defined. */ 1826 { /* Check all profiles currently assigned to domains are defined. */
1891 struct tomoyo_domain_info *domain; 1827 struct tomoyo_domain_info *domain;
1892 down_read(&tomoyo_domain_list_lock); 1828 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
1893 list_for_each_entry(domain, &tomoyo_domain_list, list) {
1894 const u8 profile = domain->profile; 1829 const u8 profile = domain->profile;
1895 if (tomoyo_profile_ptr[profile]) 1830 if (tomoyo_profile_ptr[profile])
1896 continue; 1831 continue;
1897 panic("Profile %u (used by '%s') not defined.\n", 1832 panic("Profile %u (used by '%s') not defined.\n",
1898 profile, domain->domainname->name); 1833 profile, domain->domainname->name);
1899 } 1834 }
1900 up_read(&tomoyo_domain_list_lock);
1901 } 1835 }
1902} 1836}
1903 1837
@@ -1945,10 +1879,12 @@ static int tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
1945 * @file: Pointer to "struct file". 1879 * @file: Pointer to "struct file".
1946 * 1880 *
1947 * Associates policy handler and returns 0 on success, -ENOMEM otherwise. 1881 * Associates policy handler and returns 0 on success, -ENOMEM otherwise.
1882 *
1883 * Caller acquires tomoyo_read_lock().
1948 */ 1884 */
1949static int tomoyo_open_control(const u8 type, struct file *file) 1885static int tomoyo_open_control(const u8 type, struct file *file)
1950{ 1886{
1951 struct tomoyo_io_buffer *head = tomoyo_alloc(sizeof(*head)); 1887 struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_KERNEL);
1952 1888
1953 if (!head) 1889 if (!head)
1954 return -ENOMEM; 1890 return -ENOMEM;
@@ -2009,9 +1945,9 @@ static int tomoyo_open_control(const u8 type, struct file *file)
2009 } else { 1945 } else {
2010 if (!head->readbuf_size) 1946 if (!head->readbuf_size)
2011 head->readbuf_size = 4096 * 2; 1947 head->readbuf_size = 4096 * 2;
2012 head->read_buf = tomoyo_alloc(head->readbuf_size); 1948 head->read_buf = kzalloc(head->readbuf_size, GFP_KERNEL);
2013 if (!head->read_buf) { 1949 if (!head->read_buf) {
2014 tomoyo_free(head); 1950 kfree(head);
2015 return -ENOMEM; 1951 return -ENOMEM;
2016 } 1952 }
2017 } 1953 }
@@ -2023,13 +1959,14 @@ static int tomoyo_open_control(const u8 type, struct file *file)
2023 head->write = NULL; 1959 head->write = NULL;
2024 } else if (head->write) { 1960 } else if (head->write) {
2025 head->writebuf_size = 4096 * 2; 1961 head->writebuf_size = 4096 * 2;
2026 head->write_buf = tomoyo_alloc(head->writebuf_size); 1962 head->write_buf = kzalloc(head->writebuf_size, GFP_KERNEL);
2027 if (!head->write_buf) { 1963 if (!head->write_buf) {
2028 tomoyo_free(head->read_buf); 1964 kfree(head->read_buf);
2029 tomoyo_free(head); 1965 kfree(head);
2030 return -ENOMEM; 1966 return -ENOMEM;
2031 } 1967 }
2032 } 1968 }
1969 head->reader_idx = tomoyo_read_lock();
2033 file->private_data = head; 1970 file->private_data = head;
2034 /* 1971 /*
2035 * Call the handler now if the file is 1972 * Call the handler now if the file is
@@ -2051,6 +1988,8 @@ static int tomoyo_open_control(const u8 type, struct file *file)
2051 * @buffer_len: Size of @buffer. 1988 * @buffer_len: Size of @buffer.
2052 * 1989 *
2053 * Returns bytes read on success, negative value otherwise. 1990 * Returns bytes read on success, negative value otherwise.
1991 *
1992 * Caller holds tomoyo_read_lock().
2054 */ 1993 */
2055static int tomoyo_read_control(struct file *file, char __user *buffer, 1994static int tomoyo_read_control(struct file *file, char __user *buffer,
2056 const int buffer_len) 1995 const int buffer_len)
@@ -2094,6 +2033,8 @@ static int tomoyo_read_control(struct file *file, char __user *buffer,
2094 * @buffer_len: Size of @buffer. 2033 * @buffer_len: Size of @buffer.
2095 * 2034 *
2096 * Returns @buffer_len on success, negative value otherwise. 2035 * Returns @buffer_len on success, negative value otherwise.
2036 *
2037 * Caller holds tomoyo_read_lock().
2097 */ 2038 */
2098static int tomoyo_write_control(struct file *file, const char __user *buffer, 2039static int tomoyo_write_control(struct file *file, const char __user *buffer,
2099 const int buffer_len) 2040 const int buffer_len)
@@ -2144,52 +2085,29 @@ static int tomoyo_write_control(struct file *file, const char __user *buffer,
2144 * @file: Pointer to "struct file". 2085 * @file: Pointer to "struct file".
2145 * 2086 *
2146 * Releases memory and returns 0. 2087 * Releases memory and returns 0.
2088 *
2089 * Caller looses tomoyo_read_lock().
2147 */ 2090 */
2148static int tomoyo_close_control(struct file *file) 2091static int tomoyo_close_control(struct file *file)
2149{ 2092{
2150 struct tomoyo_io_buffer *head = file->private_data; 2093 struct tomoyo_io_buffer *head = file->private_data;
2094 const bool is_write = !!head->write_buf;
2151 2095
2096 tomoyo_read_unlock(head->reader_idx);
2152 /* Release memory used for policy I/O. */ 2097 /* Release memory used for policy I/O. */
2153 tomoyo_free(head->read_buf); 2098 kfree(head->read_buf);
2154 head->read_buf = NULL; 2099 head->read_buf = NULL;
2155 tomoyo_free(head->write_buf); 2100 kfree(head->write_buf);
2156 head->write_buf = NULL; 2101 head->write_buf = NULL;
2157 tomoyo_free(head); 2102 kfree(head);
2158 head = NULL; 2103 head = NULL;
2159 file->private_data = NULL; 2104 file->private_data = NULL;
2105 if (is_write)
2106 tomoyo_run_gc();
2160 return 0; 2107 return 0;
2161} 2108}
2162 2109
2163/** 2110/**
2164 * tomoyo_alloc_acl_element - Allocate permanent memory for ACL entry.
2165 *
2166 * @acl_type: Type of ACL entry.
2167 *
2168 * Returns pointer to the ACL entry on success, NULL otherwise.
2169 */
2170void *tomoyo_alloc_acl_element(const u8 acl_type)
2171{
2172 int len;
2173 struct tomoyo_acl_info *ptr;
2174
2175 switch (acl_type) {
2176 case TOMOYO_TYPE_SINGLE_PATH_ACL:
2177 len = sizeof(struct tomoyo_single_path_acl_record);
2178 break;
2179 case TOMOYO_TYPE_DOUBLE_PATH_ACL:
2180 len = sizeof(struct tomoyo_double_path_acl_record);
2181 break;
2182 default:
2183 return NULL;
2184 }
2185 ptr = tomoyo_alloc_element(len);
2186 if (!ptr)
2187 return NULL;
2188 ptr->type = acl_type;
2189 return ptr;
2190}
2191
2192/**
2193 * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface. 2111 * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
2194 * 2112 *
2195 * @inode: Pointer to "struct inode". 2113 * @inode: Pointer to "struct inode".
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 92169d29b2db..67bd22dd3e68 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -1,12 +1,9 @@
1/* 1/*
2 * security/tomoyo/common.h 2 * security/tomoyo/common.h
3 * 3 *
4 * Common functions for TOMOYO. 4 * Header file for TOMOYO.
5 *
6 * Copyright (C) 2005-2009 NTT DATA CORPORATION
7 *
8 * Version: 2.2.0 2009/04/01
9 * 5 *
6 * Copyright (C) 2005-2010 NTT DATA CORPORATION
10 */ 7 */
11 8
12#ifndef _SECURITY_TOMOYO_COMMON_H 9#ifndef _SECURITY_TOMOYO_COMMON_H
@@ -22,9 +19,119 @@
22#include <linux/namei.h> 19#include <linux/namei.h>
23#include <linux/mount.h> 20#include <linux/mount.h>
24#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/cred.h>
23struct linux_binprm;
24
25/********** Constants definitions. **********/
26
27/*
28 * TOMOYO uses this hash only when appending a string into the string
29 * table. Frequency of appending strings is very low. So we don't need
30 * large (e.g. 64k) hash size. 256 will be sufficient.
31 */
32#define TOMOYO_HASH_BITS 8
33#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
34
35/*
36 * This is the max length of a token.
37 *
38 * A token consists of only ASCII printable characters.
39 * Non printable characters in a token is represented in \ooo style
40 * octal string. Thus, \ itself is represented as \\.
41 */
42#define TOMOYO_MAX_PATHNAME_LEN 4000
43
44/* Profile number is an integer between 0 and 255. */
45#define TOMOYO_MAX_PROFILES 256
46
47/* Keywords for ACLs. */
48#define TOMOYO_KEYWORD_ALIAS "alias "
49#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
50#define TOMOYO_KEYWORD_DELETE "delete "
51#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
52#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
53#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
54#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
55#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
56#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
57#define TOMOYO_KEYWORD_SELECT "select "
58#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
59#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
60/* A domain definition starts with <kernel>. */
61#define TOMOYO_ROOT_NAME "<kernel>"
62#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
63
64/* Index numbers for Access Controls. */
65enum tomoyo_mac_index {
66 TOMOYO_MAC_FOR_FILE, /* domain_policy.conf */
67 TOMOYO_MAX_ACCEPT_ENTRY,
68 TOMOYO_VERBOSE,
69 TOMOYO_MAX_CONTROL_INDEX
70};
71
72/* Index numbers for Access Controls. */
73enum tomoyo_acl_entry_type_index {
74 TOMOYO_TYPE_PATH_ACL,
75 TOMOYO_TYPE_PATH2_ACL,
76};
77
78/* Index numbers for File Controls. */
79
80/*
81 * TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
82 * if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
83 * TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
84 * TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
85 * TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
86 * automatically cleared if TYPE_READ_WRITE_ACL is cleared.
87 */
88
89enum tomoyo_path_acl_index {
90 TOMOYO_TYPE_READ_WRITE,
91 TOMOYO_TYPE_EXECUTE,
92 TOMOYO_TYPE_READ,
93 TOMOYO_TYPE_WRITE,
94 TOMOYO_TYPE_CREATE,
95 TOMOYO_TYPE_UNLINK,
96 TOMOYO_TYPE_MKDIR,
97 TOMOYO_TYPE_RMDIR,
98 TOMOYO_TYPE_MKFIFO,
99 TOMOYO_TYPE_MKSOCK,
100 TOMOYO_TYPE_MKBLOCK,
101 TOMOYO_TYPE_MKCHAR,
102 TOMOYO_TYPE_TRUNCATE,
103 TOMOYO_TYPE_SYMLINK,
104 TOMOYO_TYPE_REWRITE,
105 TOMOYO_TYPE_IOCTL,
106 TOMOYO_TYPE_CHMOD,
107 TOMOYO_TYPE_CHOWN,
108 TOMOYO_TYPE_CHGRP,
109 TOMOYO_TYPE_CHROOT,
110 TOMOYO_TYPE_MOUNT,
111 TOMOYO_TYPE_UMOUNT,
112 TOMOYO_MAX_PATH_OPERATION
113};
25 114
26struct dentry; 115enum tomoyo_path2_acl_index {
27struct vfsmount; 116 TOMOYO_TYPE_LINK,
117 TOMOYO_TYPE_RENAME,
118 TOMOYO_TYPE_PIVOT_ROOT,
119 TOMOYO_MAX_PATH2_OPERATION
120};
121
122enum tomoyo_securityfs_interface_index {
123 TOMOYO_DOMAINPOLICY,
124 TOMOYO_EXCEPTIONPOLICY,
125 TOMOYO_DOMAIN_STATUS,
126 TOMOYO_PROCESS_STATUS,
127 TOMOYO_MEMINFO,
128 TOMOYO_SELFDOMAIN,
129 TOMOYO_VERSION,
130 TOMOYO_PROFILE,
131 TOMOYO_MANAGER
132};
133
134/********** Structure definitions. **********/
28 135
29/* 136/*
30 * tomoyo_page_buffer is a structure which is used for holding a pathname 137 * tomoyo_page_buffer is a structure which is used for holding a pathname
@@ -66,13 +173,14 @@ struct tomoyo_path_info {
66}; 173};
67 174
68/* 175/*
69 * This is the max length of a token. 176 * tomoyo_name_entry is a structure which is used for linking
70 * 177 * "struct tomoyo_path_info" into tomoyo_name_list .
71 * A token consists of only ASCII printable characters.
72 * Non printable characters in a token is represented in \ooo style
73 * octal string. Thus, \ itself is represented as \\.
74 */ 178 */
75#define TOMOYO_MAX_PATHNAME_LEN 4000 179struct tomoyo_name_entry {
180 struct list_head list;
181 atomic_t users;
182 struct tomoyo_path_info entry;
183};
76 184
77/* 185/*
78 * tomoyo_path_info_with_data is a structure which is used for holding a 186 * tomoyo_path_info_with_data is a structure which is used for holding a
@@ -89,7 +197,7 @@ struct tomoyo_path_info {
89 * "struct tomoyo_path_info_with_data". 197 * "struct tomoyo_path_info_with_data".
90 */ 198 */
91struct tomoyo_path_info_with_data { 199struct tomoyo_path_info_with_data {
92 /* Keep "head" first, for this pointer is passed to tomoyo_free(). */ 200 /* Keep "head" first, for this pointer is passed to kfree(). */
93 struct tomoyo_path_info head; 201 struct tomoyo_path_info head;
94 char barrier1[16]; /* Safeguard for overrun. */ 202 char barrier1[16]; /* Safeguard for overrun. */
95 char body[TOMOYO_MAX_PATHNAME_LEN]; 203 char body[TOMOYO_MAX_PATHNAME_LEN];
@@ -101,30 +209,19 @@ struct tomoyo_path_info_with_data {
101 * 209 *
102 * (1) "list" which is linked to the ->acl_info_list of 210 * (1) "list" which is linked to the ->acl_info_list of
103 * "struct tomoyo_domain_info" 211 * "struct tomoyo_domain_info"
104 * (2) "type" which tells 212 * (2) "type" which tells type of the entry (either
105 * (a) type & 0x7F : type of the entry (either 213 * "struct tomoyo_path_acl" or "struct tomoyo_path2_acl").
106 * "struct tomoyo_single_path_acl_record" or
107 * "struct tomoyo_double_path_acl_record")
108 * (b) type & 0x80 : whether the entry is marked as "deleted".
109 * 214 *
110 * Packing "struct tomoyo_acl_info" allows 215 * Packing "struct tomoyo_acl_info" allows
111 * "struct tomoyo_single_path_acl_record" to embed "u16" and 216 * "struct tomoyo_path_acl" to embed "u8" + "u16" and
112 * "struct tomoyo_double_path_acl_record" to embed "u8" 217 * "struct tomoyo_path2_acl" to embed "u8"
113 * without enlarging their structure size. 218 * without enlarging their structure size.
114 */ 219 */
115struct tomoyo_acl_info { 220struct tomoyo_acl_info {
116 struct list_head list; 221 struct list_head list;
117 /*
118 * Type of this ACL entry.
119 *
120 * MSB is is_deleted flag.
121 */
122 u8 type; 222 u8 type;
123} __packed; 223} __packed;
124 224
125/* This ACL entry is deleted. */
126#define TOMOYO_ACL_DELETED 0x80
127
128/* 225/*
129 * tomoyo_domain_info is a structure which is used for holding permissions 226 * tomoyo_domain_info is a structure which is used for holding permissions
130 * (e.g. "allow_read /lib/libc-2.5.so") given to each domain. 227 * (e.g. "allow_read /lib/libc-2.5.so") given to each domain.
@@ -138,7 +235,17 @@ struct tomoyo_acl_info {
138 * "deleted", false otherwise. 235 * "deleted", false otherwise.
139 * (6) "quota_warned" is a bool which is used for suppressing warning message 236 * (6) "quota_warned" is a bool which is used for suppressing warning message
140 * when learning mode learned too much entries. 237 * when learning mode learned too much entries.
141 * (7) "flags" which remembers this domain's attributes. 238 * (7) "ignore_global_allow_read" is a bool which is true if this domain
239 * should ignore "allow_read" directive in exception policy.
240 * (8) "transition_failed" is a bool which is set to true when this domain was
241 * unable to create a new domain at tomoyo_find_next_domain() because the
242 * name of the domain to be created was too long or it could not allocate
243 * memory. If set to true, more than one process continued execve()
244 * without domain transition.
245 * (9) "users" is an atomic_t that holds how many "struct cred"->security
246 * are referring this "struct tomoyo_domain_info". If is_deleted == true
247 * and users == 0, this struct will be kfree()d upon next garbage
248 * collection.
142 * 249 *
143 * A domain's lifecycle is an analogy of files on / directory. 250 * A domain's lifecycle is an analogy of files on / directory.
144 * Multiple domains with the same domainname cannot be created (as with 251 * Multiple domains with the same domainname cannot be created (as with
@@ -155,25 +262,13 @@ struct tomoyo_domain_info {
155 u8 profile; /* Profile number to use. */ 262 u8 profile; /* Profile number to use. */
156 bool is_deleted; /* Delete flag. */ 263 bool is_deleted; /* Delete flag. */
157 bool quota_warned; /* Quota warnning flag. */ 264 bool quota_warned; /* Quota warnning flag. */
158 /* DOMAIN_FLAGS_*. Use tomoyo_set_domain_flag() to modify. */ 265 bool ignore_global_allow_read; /* Ignore "allow_read" flag. */
159 u8 flags; 266 bool transition_failed; /* Domain transition failed flag. */
267 atomic_t users; /* Number of referring credentials. */
160}; 268};
161 269
162/* Profile number is an integer between 0 and 255. */
163#define TOMOYO_MAX_PROFILES 256
164
165/* Ignore "allow_read" directive in exception policy. */
166#define TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ 1
167/*
168 * This domain was unable to create a new domain at tomoyo_find_next_domain()
169 * because the name of the domain to be created was too long or
170 * it could not allocate memory.
171 * More than one process continued execve() without domain transition.
172 */
173#define TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED 2
174
175/* 270/*
176 * tomoyo_single_path_acl_record is a structure which is used for holding an 271 * tomoyo_path_acl is a structure which is used for holding an
177 * entry with one pathname operation (e.g. open(), mkdir()). 272 * entry with one pathname operation (e.g. open(), mkdir()).
178 * It has following fields. 273 * It has following fields.
179 * 274 *
@@ -184,18 +279,21 @@ struct tomoyo_domain_info {
184 * Directives held by this structure are "allow_read/write", "allow_execute", 279 * Directives held by this structure are "allow_read/write", "allow_execute",
185 * "allow_read", "allow_write", "allow_create", "allow_unlink", "allow_mkdir", 280 * "allow_read", "allow_write", "allow_create", "allow_unlink", "allow_mkdir",
186 * "allow_rmdir", "allow_mkfifo", "allow_mksock", "allow_mkblock", 281 * "allow_rmdir", "allow_mkfifo", "allow_mksock", "allow_mkblock",
187 * "allow_mkchar", "allow_truncate", "allow_symlink" and "allow_rewrite". 282 * "allow_mkchar", "allow_truncate", "allow_symlink", "allow_rewrite",
283 * "allow_chmod", "allow_chown", "allow_chgrp", "allow_chroot", "allow_mount"
284 * and "allow_unmount".
188 */ 285 */
189struct tomoyo_single_path_acl_record { 286struct tomoyo_path_acl {
190 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_SINGLE_PATH_ACL */ 287 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */
288 u8 perm_high;
191 u16 perm; 289 u16 perm;
192 /* Pointer to single pathname. */ 290 /* Pointer to single pathname. */
193 const struct tomoyo_path_info *filename; 291 const struct tomoyo_path_info *filename;
194}; 292};
195 293
196/* 294/*
197 * tomoyo_double_path_acl_record is a structure which is used for holding an 295 * tomoyo_path2_acl is a structure which is used for holding an
198 * entry with two pathnames operation (i.e. link() and rename()). 296 * entry with two pathnames operation (i.e. link(), rename() and pivot_root()).
199 * It has following fields. 297 * It has following fields.
200 * 298 *
201 * (1) "head" which is a "struct tomoyo_acl_info". 299 * (1) "head" which is a "struct tomoyo_acl_info".
@@ -203,10 +301,11 @@ struct tomoyo_single_path_acl_record {
203 * (3) "filename1" is the source/old pathname. 301 * (3) "filename1" is the source/old pathname.
204 * (4) "filename2" is the destination/new pathname. 302 * (4) "filename2" is the destination/new pathname.
205 * 303 *
206 * Directives held by this structure are "allow_rename" and "allow_link". 304 * Directives held by this structure are "allow_rename", "allow_link" and
305 * "allow_pivot_root".
207 */ 306 */
208struct tomoyo_double_path_acl_record { 307struct tomoyo_path2_acl {
209 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_DOUBLE_PATH_ACL */ 308 struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */
210 u8 perm; 309 u8 perm;
211 /* Pointer to single pathname. */ 310 /* Pointer to single pathname. */
212 const struct tomoyo_path_info *filename1; 311 const struct tomoyo_path_info *filename1;
@@ -214,29 +313,6 @@ struct tomoyo_double_path_acl_record {
214 const struct tomoyo_path_info *filename2; 313 const struct tomoyo_path_info *filename2;
215}; 314};
216 315
217/* Keywords for ACLs. */
218#define TOMOYO_KEYWORD_ALIAS "alias "
219#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
220#define TOMOYO_KEYWORD_DELETE "delete "
221#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
222#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
223#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
224#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
225#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
226#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
227#define TOMOYO_KEYWORD_SELECT "select "
228#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
229#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
230/* A domain definition starts with <kernel>. */
231#define TOMOYO_ROOT_NAME "<kernel>"
232#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
233
234/* Index numbers for Access Controls. */
235#define TOMOYO_MAC_FOR_FILE 0 /* domain_policy.conf */
236#define TOMOYO_MAX_ACCEPT_ENTRY 1
237#define TOMOYO_VERBOSE 2
238#define TOMOYO_MAX_CONTROL_INDEX 3
239
240/* 316/*
241 * tomoyo_io_buffer is a structure which is used for reading and modifying 317 * tomoyo_io_buffer is a structure which is used for reading and modifying
242 * configuration via /sys/kernel/security/tomoyo/ interface. 318 * configuration via /sys/kernel/security/tomoyo/ interface.
@@ -265,6 +341,8 @@ struct tomoyo_io_buffer {
265 int (*write) (struct tomoyo_io_buffer *); 341 int (*write) (struct tomoyo_io_buffer *);
266 /* Exclusive lock for this structure. */ 342 /* Exclusive lock for this structure. */
267 struct mutex io_sem; 343 struct mutex io_sem;
344 /* Index returned by tomoyo_read_lock(). */
345 int reader_idx;
268 /* The position currently reading from. */ 346 /* The position currently reading from. */
269 struct list_head *read_var1; 347 struct list_head *read_var1;
270 /* Extra variables for reading. */ 348 /* Extra variables for reading. */
@@ -293,18 +371,159 @@ struct tomoyo_io_buffer {
293 int writebuf_size; 371 int writebuf_size;
294}; 372};
295 373
374/*
375 * tomoyo_globally_readable_file_entry is a structure which is used for holding
376 * "allow_read" entries.
377 * It has following fields.
378 *
379 * (1) "list" which is linked to tomoyo_globally_readable_list .
380 * (2) "filename" is a pathname which is allowed to open(O_RDONLY).
381 * (3) "is_deleted" is a bool which is true if marked as deleted, false
382 * otherwise.
383 */
384struct tomoyo_globally_readable_file_entry {
385 struct list_head list;
386 const struct tomoyo_path_info *filename;
387 bool is_deleted;
388};
389
390/*
391 * tomoyo_pattern_entry is a structure which is used for holding
392 * "tomoyo_pattern_list" entries.
393 * It has following fields.
394 *
395 * (1) "list" which is linked to tomoyo_pattern_list .
396 * (2) "pattern" is a pathname pattern which is used for converting pathnames
397 * to pathname patterns during learning mode.
398 * (3) "is_deleted" is a bool which is true if marked as deleted, false
399 * otherwise.
400 */
401struct tomoyo_pattern_entry {
402 struct list_head list;
403 const struct tomoyo_path_info *pattern;
404 bool is_deleted;
405};
406
407/*
408 * tomoyo_no_rewrite_entry is a structure which is used for holding
409 * "deny_rewrite" entries.
410 * It has following fields.
411 *
412 * (1) "list" which is linked to tomoyo_no_rewrite_list .
413 * (2) "pattern" is a pathname which is by default not permitted to modify
414 * already existing content.
415 * (3) "is_deleted" is a bool which is true if marked as deleted, false
416 * otherwise.
417 */
418struct tomoyo_no_rewrite_entry {
419 struct list_head list;
420 const struct tomoyo_path_info *pattern;
421 bool is_deleted;
422};
423
424/*
425 * tomoyo_domain_initializer_entry is a structure which is used for holding
426 * "initialize_domain" and "no_initialize_domain" entries.
427 * It has following fields.
428 *
429 * (1) "list" which is linked to tomoyo_domain_initializer_list .
430 * (2) "domainname" which is "a domainname" or "the last component of a
431 * domainname". This field is NULL if "from" clause is not specified.
432 * (3) "program" which is a program's pathname.
433 * (4) "is_deleted" is a bool which is true if marked as deleted, false
434 * otherwise.
435 * (5) "is_not" is a bool which is true if "no_initialize_domain", false
436 * otherwise.
437 * (6) "is_last_name" is a bool which is true if "domainname" is "the last
438 * component of a domainname", false otherwise.
439 */
440struct tomoyo_domain_initializer_entry {
441 struct list_head list;
442 const struct tomoyo_path_info *domainname; /* This may be NULL */
443 const struct tomoyo_path_info *program;
444 bool is_deleted;
445 bool is_not; /* True if this entry is "no_initialize_domain". */
446 /* True if the domainname is tomoyo_get_last_name(). */
447 bool is_last_name;
448};
449
450/*
451 * tomoyo_domain_keeper_entry is a structure which is used for holding
452 * "keep_domain" and "no_keep_domain" entries.
453 * It has following fields.
454 *
455 * (1) "list" which is linked to tomoyo_domain_keeper_list .
456 * (2) "domainname" which is "a domainname" or "the last component of a
457 * domainname".
458 * (3) "program" which is a program's pathname.
459 * This field is NULL if "from" clause is not specified.
460 * (4) "is_deleted" is a bool which is true if marked as deleted, false
461 * otherwise.
462 * (5) "is_not" is a bool which is true if "no_initialize_domain", false
463 * otherwise.
464 * (6) "is_last_name" is a bool which is true if "domainname" is "the last
465 * component of a domainname", false otherwise.
466 */
467struct tomoyo_domain_keeper_entry {
468 struct list_head list;
469 const struct tomoyo_path_info *domainname;
470 const struct tomoyo_path_info *program; /* This may be NULL */
471 bool is_deleted;
472 bool is_not; /* True if this entry is "no_keep_domain". */
473 /* True if the domainname is tomoyo_get_last_name(). */
474 bool is_last_name;
475};
476
477/*
478 * tomoyo_alias_entry is a structure which is used for holding "alias" entries.
479 * It has following fields.
480 *
481 * (1) "list" which is linked to tomoyo_alias_list .
482 * (2) "original_name" which is a dereferenced pathname.
483 * (3) "aliased_name" which is a symlink's pathname.
484 * (4) "is_deleted" is a bool which is true if marked as deleted, false
485 * otherwise.
486 */
487struct tomoyo_alias_entry {
488 struct list_head list;
489 const struct tomoyo_path_info *original_name;
490 const struct tomoyo_path_info *aliased_name;
491 bool is_deleted;
492};
493
494/*
495 * tomoyo_policy_manager_entry is a structure which is used for holding list of
496 * domainnames or programs which are permitted to modify configuration via
497 * /sys/kernel/security/tomoyo/ interface.
498 * It has following fields.
499 *
500 * (1) "list" which is linked to tomoyo_policy_manager_list .
501 * (2) "manager" is a domainname or a program's pathname.
502 * (3) "is_domain" is a bool which is true if "manager" is a domainname, false
503 * otherwise.
504 * (4) "is_deleted" is a bool which is true if marked as deleted, false
505 * otherwise.
506 */
507struct tomoyo_policy_manager_entry {
508 struct list_head list;
509 /* A path to program or a domainname. */
510 const struct tomoyo_path_info *manager;
511 bool is_domain; /* True if manager is a domainname. */
512 bool is_deleted; /* True if this entry is deleted. */
513};
514
515/********** Function prototypes. **********/
516
296/* Check whether the domain has too many ACL entries to hold. */ 517/* Check whether the domain has too many ACL entries to hold. */
297bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain); 518bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain);
298/* Transactional sprintf() for policy dump. */ 519/* Transactional sprintf() for policy dump. */
299bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) 520bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
300 __attribute__ ((format(printf, 2, 3))); 521 __attribute__ ((format(printf, 2, 3)));
301/* Check whether the domainname is correct. */ 522/* Check whether the domainname is correct. */
302bool tomoyo_is_correct_domain(const unsigned char *domainname, 523bool tomoyo_is_correct_domain(const unsigned char *domainname);
303 const char *function);
304/* Check whether the token is correct. */ 524/* Check whether the token is correct. */
305bool tomoyo_is_correct_path(const char *filename, const s8 start_type, 525bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
306 const s8 pattern_type, const s8 end_type, 526 const s8 pattern_type, const s8 end_type);
307 const char *function);
308/* Check whether the token can be a domainname. */ 527/* Check whether the token can be a domainname. */
309bool tomoyo_is_domain_def(const unsigned char *buffer); 528bool tomoyo_is_domain_def(const unsigned char *buffer);
310/* Check whether the given filename matches the given pattern. */ 529/* Check whether the given filename matches the given pattern. */
@@ -328,13 +547,13 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head);
328/* Write domain policy violation warning message to console? */ 547/* Write domain policy violation warning message to console? */
329bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain); 548bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain);
330/* Convert double path operation to operation name. */ 549/* Convert double path operation to operation name. */
331const char *tomoyo_dp2keyword(const u8 operation); 550const char *tomoyo_path22keyword(const u8 operation);
332/* Get the last component of the given domainname. */ 551/* Get the last component of the given domainname. */
333const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain); 552const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
334/* Get warning message. */ 553/* Get warning message. */
335const char *tomoyo_get_msg(const bool is_enforce); 554const char *tomoyo_get_msg(const bool is_enforce);
336/* Convert single path operation to operation name. */ 555/* Convert single path operation to operation name. */
337const char *tomoyo_sp2keyword(const u8 operation); 556const char *tomoyo_path2keyword(const u8 operation);
338/* Create "alias" entry in exception policy. */ 557/* Create "alias" entry in exception policy. */
339int tomoyo_write_alias_policy(char *data, const bool is_delete); 558int tomoyo_write_alias_policy(char *data, const bool is_delete);
340/* 559/*
@@ -370,33 +589,107 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
370/* Check mode for specified functionality. */ 589/* Check mode for specified functionality. */
371unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain, 590unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
372 const u8 index); 591 const u8 index);
373/* Allocate memory for structures. */
374void *tomoyo_alloc_acl_element(const u8 acl_type);
375/* Fill in "struct tomoyo_path_info" members. */ 592/* Fill in "struct tomoyo_path_info" members. */
376void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); 593void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
377/* Run policy loader when /sbin/init starts. */ 594/* Run policy loader when /sbin/init starts. */
378void tomoyo_load_policy(const char *filename); 595void tomoyo_load_policy(const char *filename);
379/* Change "struct tomoyo_domain_info"->flags. */
380void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
381 const bool is_delete, const u8 flags);
382 596
383/* strcmp() for "struct tomoyo_path_info" structure. */ 597/* Convert binary string to ascii string. */
384static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a, 598int tomoyo_encode(char *buffer, int buflen, const char *str);
385 const struct tomoyo_path_info *b) 599
600/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
601int tomoyo_realpath_from_path2(struct path *path, char *newname,
602 int newname_len);
603
604/*
605 * Returns realpath(3) of the given pathname but ignores chroot'ed root.
606 * These functions use kzalloc(), so the caller must call kfree()
607 * if these functions didn't return NULL.
608 */
609char *tomoyo_realpath(const char *pathname);
610/*
611 * Same with tomoyo_realpath() except that it doesn't follow the final symlink.
612 */
613char *tomoyo_realpath_nofollow(const char *pathname);
614/* Same with tomoyo_realpath() except that the pathname is already solved. */
615char *tomoyo_realpath_from_path(struct path *path);
616
617/* Check memory quota. */
618bool tomoyo_memory_ok(void *ptr);
619
620/*
621 * Keep the given name on the RAM.
622 * The RAM is shared, so NEVER try to modify or kfree() the returned name.
623 */
624const struct tomoyo_path_info *tomoyo_get_name(const char *name);
625
626/* Check for memory usage. */
627int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
628
629/* Set memory quota. */
630int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
631
632/* Initialize realpath related code. */
633void __init tomoyo_realpath_init(void);
634int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
635 const struct tomoyo_path_info *filename);
636int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
637 struct path *path, const int flag);
638int tomoyo_path_perm(const u8 operation, struct path *path);
639int tomoyo_path2_perm(const u8 operation, struct path *path1,
640 struct path *path2);
641int tomoyo_check_rewrite_permission(struct file *filp);
642int tomoyo_find_next_domain(struct linux_binprm *bprm);
643
644/* Run garbage collector. */
645void tomoyo_run_gc(void);
646
647void tomoyo_memory_free(void *ptr);
648
649/********** External variable definitions. **********/
650
651/* Lock for GC. */
652extern struct srcu_struct tomoyo_ss;
653
654/* The list for "struct tomoyo_domain_info". */
655extern struct list_head tomoyo_domain_list;
656
657extern struct list_head tomoyo_domain_initializer_list;
658extern struct list_head tomoyo_domain_keeper_list;
659extern struct list_head tomoyo_alias_list;
660extern struct list_head tomoyo_globally_readable_list;
661extern struct list_head tomoyo_pattern_list;
662extern struct list_head tomoyo_no_rewrite_list;
663extern struct list_head tomoyo_policy_manager_list;
664extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
665extern struct mutex tomoyo_name_list_lock;
666
667/* Lock for protecting policy. */
668extern struct mutex tomoyo_policy_lock;
669
670/* Has /sbin/init started? */
671extern bool tomoyo_policy_loaded;
672
673/* The kernel's domain. */
674extern struct tomoyo_domain_info tomoyo_kernel_domain;
675
676/********** Inlined functions. **********/
677
678static inline int tomoyo_read_lock(void)
386{ 679{
387 return a->hash != b->hash || strcmp(a->name, b->name); 680 return srcu_read_lock(&tomoyo_ss);
388} 681}
389 682
390/* Get type of an ACL entry. */ 683static inline void tomoyo_read_unlock(int idx)
391static inline u8 tomoyo_acl_type1(struct tomoyo_acl_info *ptr)
392{ 684{
393 return ptr->type & ~TOMOYO_ACL_DELETED; 685 srcu_read_unlock(&tomoyo_ss, idx);
394} 686}
395 687
396/* Get type of an ACL entry. */ 688/* strcmp() for "struct tomoyo_path_info" structure. */
397static inline u8 tomoyo_acl_type2(struct tomoyo_acl_info *ptr) 689static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
690 const struct tomoyo_path_info *b)
398{ 691{
399 return ptr->type; 692 return a->hash != b->hash || strcmp(a->name, b->name);
400} 693}
401 694
402/** 695/**
@@ -423,18 +716,25 @@ static inline bool tomoyo_is_invalid(const unsigned char c)
423 return c && (c <= ' ' || c >= 127); 716 return c && (c <= ' ' || c >= 127);
424} 717}
425 718
426/* The list for "struct tomoyo_domain_info". */ 719static inline void tomoyo_put_name(const struct tomoyo_path_info *name)
427extern struct list_head tomoyo_domain_list; 720{
428extern struct rw_semaphore tomoyo_domain_list_lock; 721 if (name) {
429 722 struct tomoyo_name_entry *ptr =
430/* Lock for domain->acl_info_list. */ 723 container_of(name, struct tomoyo_name_entry, entry);
431extern struct rw_semaphore tomoyo_domain_acl_info_list_lock; 724 atomic_dec(&ptr->users);
725 }
726}
432 727
433/* Has /sbin/init started? */ 728static inline struct tomoyo_domain_info *tomoyo_domain(void)
434extern bool tomoyo_policy_loaded; 729{
730 return current_cred()->security;
731}
435 732
436/* The kernel's domain. */ 733static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
437extern struct tomoyo_domain_info tomoyo_kernel_domain; 734 *task)
735{
736 return task_cred_xxx(task, security);
737}
438 738
439/** 739/**
440 * list_for_each_cookie - iterate over a list with cookie. 740 * list_for_each_cookie - iterate over a list with cookie.
@@ -442,16 +742,16 @@ extern struct tomoyo_domain_info tomoyo_kernel_domain;
442 * @cookie: the &struct list_head to use as a cookie. 742 * @cookie: the &struct list_head to use as a cookie.
443 * @head: the head for your list. 743 * @head: the head for your list.
444 * 744 *
445 * Same with list_for_each() except that this primitive uses @cookie 745 * Same with list_for_each_rcu() except that this primitive uses @cookie
446 * so that we can continue iteration. 746 * so that we can continue iteration.
447 * @cookie must be NULL when iteration starts, and @cookie will become 747 * @cookie must be NULL when iteration starts, and @cookie will become
448 * NULL when iteration finishes. 748 * NULL when iteration finishes.
449 */ 749 */
450#define list_for_each_cookie(pos, cookie, head) \ 750#define list_for_each_cookie(pos, cookie, head) \
451 for (({ if (!cookie) \ 751 for (({ if (!cookie) \
452 cookie = head; }), \ 752 cookie = head; }), \
453 pos = (cookie)->next; \ 753 pos = rcu_dereference((cookie)->next); \
454 prefetch(pos->next), pos != (head) || ((cookie) = NULL); \ 754 prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
455 (cookie) = pos, pos = pos->next) 755 (cookie) = pos, pos = rcu_dereference(pos->next))
456 756
457#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */ 757#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index fcf52accce2b..66caaa1b842a 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -10,8 +10,6 @@
10 */ 10 */
11 11
12#include "common.h" 12#include "common.h"
13#include "tomoyo.h"
14#include "realpath.h"
15#include <linux/binfmts.h> 13#include <linux/binfmts.h>
16 14
17/* Variables definitions.*/ 15/* Variables definitions.*/
@@ -58,99 +56,6 @@ struct tomoyo_domain_info tomoyo_kernel_domain;
58 * exceptions. 56 * exceptions.
59 */ 57 */
60LIST_HEAD(tomoyo_domain_list); 58LIST_HEAD(tomoyo_domain_list);
61DECLARE_RWSEM(tomoyo_domain_list_lock);
62
63/*
64 * tomoyo_domain_initializer_entry is a structure which is used for holding
65 * "initialize_domain" and "no_initialize_domain" entries.
66 * It has following fields.
67 *
68 * (1) "list" which is linked to tomoyo_domain_initializer_list .
69 * (2) "domainname" which is "a domainname" or "the last component of a
70 * domainname". This field is NULL if "from" clause is not specified.
71 * (3) "program" which is a program's pathname.
72 * (4) "is_deleted" is a bool which is true if marked as deleted, false
73 * otherwise.
74 * (5) "is_not" is a bool which is true if "no_initialize_domain", false
75 * otherwise.
76 * (6) "is_last_name" is a bool which is true if "domainname" is "the last
77 * component of a domainname", false otherwise.
78 */
79struct tomoyo_domain_initializer_entry {
80 struct list_head list;
81 const struct tomoyo_path_info *domainname; /* This may be NULL */
82 const struct tomoyo_path_info *program;
83 bool is_deleted;
84 bool is_not; /* True if this entry is "no_initialize_domain". */
85 /* True if the domainname is tomoyo_get_last_name(). */
86 bool is_last_name;
87};
88
89/*
90 * tomoyo_domain_keeper_entry is a structure which is used for holding
91 * "keep_domain" and "no_keep_domain" entries.
92 * It has following fields.
93 *
94 * (1) "list" which is linked to tomoyo_domain_keeper_list .
95 * (2) "domainname" which is "a domainname" or "the last component of a
96 * domainname".
97 * (3) "program" which is a program's pathname.
98 * This field is NULL if "from" clause is not specified.
99 * (4) "is_deleted" is a bool which is true if marked as deleted, false
100 * otherwise.
101 * (5) "is_not" is a bool which is true if "no_initialize_domain", false
102 * otherwise.
103 * (6) "is_last_name" is a bool which is true if "domainname" is "the last
104 * component of a domainname", false otherwise.
105 */
106struct tomoyo_domain_keeper_entry {
107 struct list_head list;
108 const struct tomoyo_path_info *domainname;
109 const struct tomoyo_path_info *program; /* This may be NULL */
110 bool is_deleted;
111 bool is_not; /* True if this entry is "no_keep_domain". */
112 /* True if the domainname is tomoyo_get_last_name(). */
113 bool is_last_name;
114};
115
116/*
117 * tomoyo_alias_entry is a structure which is used for holding "alias" entries.
118 * It has following fields.
119 *
120 * (1) "list" which is linked to tomoyo_alias_list .
121 * (2) "original_name" which is a dereferenced pathname.
122 * (3) "aliased_name" which is a symlink's pathname.
123 * (4) "is_deleted" is a bool which is true if marked as deleted, false
124 * otherwise.
125 */
126struct tomoyo_alias_entry {
127 struct list_head list;
128 const struct tomoyo_path_info *original_name;
129 const struct tomoyo_path_info *aliased_name;
130 bool is_deleted;
131};
132
133/**
134 * tomoyo_set_domain_flag - Set or clear domain's attribute flags.
135 *
136 * @domain: Pointer to "struct tomoyo_domain_info".
137 * @is_delete: True if it is a delete request.
138 * @flags: Flags to set or clear.
139 *
140 * Returns nothing.
141 */
142void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
143 const bool is_delete, const u8 flags)
144{
145 /* We need to serialize because this is bitfield operation. */
146 static DEFINE_SPINLOCK(lock);
147 spin_lock(&lock);
148 if (!is_delete)
149 domain->flags |= flags;
150 else
151 domain->flags &= ~flags;
152 spin_unlock(&lock);
153}
154 59
155/** 60/**
156 * tomoyo_get_last_name - Get last component of a domainname. 61 * tomoyo_get_last_name - Get last component of a domainname.
@@ -205,8 +110,7 @@ const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain)
205 * will cause "/usr/sbin/httpd" to belong to "<kernel> /usr/sbin/httpd" domain 110 * will cause "/usr/sbin/httpd" to belong to "<kernel> /usr/sbin/httpd" domain
206 * unless executed from "<kernel> /etc/rc.d/init.d/httpd" domain. 111 * unless executed from "<kernel> /etc/rc.d/init.d/httpd" domain.
207 */ 112 */
208static LIST_HEAD(tomoyo_domain_initializer_list); 113LIST_HEAD(tomoyo_domain_initializer_list);
209static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
210 114
211/** 115/**
212 * tomoyo_update_domain_initializer_entry - Update "struct tomoyo_domain_initializer_entry" list. 116 * tomoyo_update_domain_initializer_entry - Update "struct tomoyo_domain_initializer_entry" list.
@@ -217,59 +121,65 @@ static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
217 * @is_delete: True if it is a delete request. 121 * @is_delete: True if it is a delete request.
218 * 122 *
219 * Returns 0 on success, negative value otherwise. 123 * Returns 0 on success, negative value otherwise.
124 *
125 * Caller holds tomoyo_read_lock().
220 */ 126 */
221static int tomoyo_update_domain_initializer_entry(const char *domainname, 127static int tomoyo_update_domain_initializer_entry(const char *domainname,
222 const char *program, 128 const char *program,
223 const bool is_not, 129 const bool is_not,
224 const bool is_delete) 130 const bool is_delete)
225{ 131{
226 struct tomoyo_domain_initializer_entry *new_entry; 132 struct tomoyo_domain_initializer_entry *entry = NULL;
227 struct tomoyo_domain_initializer_entry *ptr; 133 struct tomoyo_domain_initializer_entry *ptr;
228 const struct tomoyo_path_info *saved_program; 134 const struct tomoyo_path_info *saved_program = NULL;
229 const struct tomoyo_path_info *saved_domainname = NULL; 135 const struct tomoyo_path_info *saved_domainname = NULL;
230 int error = -ENOMEM; 136 int error = is_delete ? -ENOENT : -ENOMEM;
231 bool is_last_name = false; 137 bool is_last_name = false;
232 138
233 if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__)) 139 if (!tomoyo_is_correct_path(program, 1, -1, -1))
234 return -EINVAL; /* No patterns allowed. */ 140 return -EINVAL; /* No patterns allowed. */
235 if (domainname) { 141 if (domainname) {
236 if (!tomoyo_is_domain_def(domainname) && 142 if (!tomoyo_is_domain_def(domainname) &&
237 tomoyo_is_correct_path(domainname, 1, -1, -1, __func__)) 143 tomoyo_is_correct_path(domainname, 1, -1, -1))
238 is_last_name = true; 144 is_last_name = true;
239 else if (!tomoyo_is_correct_domain(domainname, __func__)) 145 else if (!tomoyo_is_correct_domain(domainname))
240 return -EINVAL; 146 return -EINVAL;
241 saved_domainname = tomoyo_save_name(domainname); 147 saved_domainname = tomoyo_get_name(domainname);
242 if (!saved_domainname) 148 if (!saved_domainname)
243 return -ENOMEM; 149 goto out;
244 } 150 }
245 saved_program = tomoyo_save_name(program); 151 saved_program = tomoyo_get_name(program);
246 if (!saved_program) 152 if (!saved_program)
247 return -ENOMEM; 153 goto out;
248 down_write(&tomoyo_domain_initializer_list_lock); 154 if (!is_delete)
249 list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) { 155 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
156 mutex_lock(&tomoyo_policy_lock);
157 list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
250 if (ptr->is_not != is_not || 158 if (ptr->is_not != is_not ||
251 ptr->domainname != saved_domainname || 159 ptr->domainname != saved_domainname ||
252 ptr->program != saved_program) 160 ptr->program != saved_program)
253 continue; 161 continue;
254 ptr->is_deleted = is_delete; 162 ptr->is_deleted = is_delete;
255 error = 0; 163 error = 0;
256 goto out; 164 break;
257 } 165 }
258 if (is_delete) { 166 if (!is_delete && error && tomoyo_memory_ok(entry)) {
259 error = -ENOENT; 167 entry->domainname = saved_domainname;
260 goto out; 168 saved_domainname = NULL;
169 entry->program = saved_program;
170 saved_program = NULL;
171 entry->is_not = is_not;
172 entry->is_last_name = is_last_name;
173 list_add_tail_rcu(&entry->list,
174 &tomoyo_domain_initializer_list);
175 entry = NULL;
176 error = 0;
261 } 177 }
262 new_entry = tomoyo_alloc_element(sizeof(*new_entry)); 178 mutex_unlock(&tomoyo_policy_lock);
263 if (!new_entry)
264 goto out;
265 new_entry->domainname = saved_domainname;
266 new_entry->program = saved_program;
267 new_entry->is_not = is_not;
268 new_entry->is_last_name = is_last_name;
269 list_add_tail(&new_entry->list, &tomoyo_domain_initializer_list);
270 error = 0;
271 out: 179 out:
272 up_write(&tomoyo_domain_initializer_list_lock); 180 tomoyo_put_name(saved_domainname);
181 tomoyo_put_name(saved_program);
182 kfree(entry);
273 return error; 183 return error;
274} 184}
275 185
@@ -279,13 +189,14 @@ static int tomoyo_update_domain_initializer_entry(const char *domainname,
279 * @head: Pointer to "struct tomoyo_io_buffer". 189 * @head: Pointer to "struct tomoyo_io_buffer".
280 * 190 *
281 * Returns true on success, false otherwise. 191 * Returns true on success, false otherwise.
192 *
193 * Caller holds tomoyo_read_lock().
282 */ 194 */
283bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head) 195bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
284{ 196{
285 struct list_head *pos; 197 struct list_head *pos;
286 bool done = true; 198 bool done = true;
287 199
288 down_read(&tomoyo_domain_initializer_list_lock);
289 list_for_each_cookie(pos, head->read_var2, 200 list_for_each_cookie(pos, head->read_var2,
290 &tomoyo_domain_initializer_list) { 201 &tomoyo_domain_initializer_list) {
291 const char *no; 202 const char *no;
@@ -308,7 +219,6 @@ bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
308 if (!done) 219 if (!done)
309 break; 220 break;
310 } 221 }
311 up_read(&tomoyo_domain_initializer_list_lock);
312 return done; 222 return done;
313} 223}
314 224
@@ -320,6 +230,8 @@ bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
320 * @is_delete: True if it is a delete request. 230 * @is_delete: True if it is a delete request.
321 * 231 *
322 * Returns 0 on success, negative value otherwise. 232 * Returns 0 on success, negative value otherwise.
233 *
234 * Caller holds tomoyo_read_lock().
323 */ 235 */
324int tomoyo_write_domain_initializer_policy(char *data, const bool is_not, 236int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
325 const bool is_delete) 237 const bool is_delete)
@@ -345,6 +257,8 @@ int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
345 * 257 *
346 * Returns true if executing @program reinitializes domain transition, 258 * Returns true if executing @program reinitializes domain transition,
347 * false otherwise. 259 * false otherwise.
260 *
261 * Caller holds tomoyo_read_lock().
348 */ 262 */
349static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info * 263static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
350 domainname, 264 domainname,
@@ -355,8 +269,7 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
355 struct tomoyo_domain_initializer_entry *ptr; 269 struct tomoyo_domain_initializer_entry *ptr;
356 bool flag = false; 270 bool flag = false;
357 271
358 down_read(&tomoyo_domain_initializer_list_lock); 272 list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
359 list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
360 if (ptr->is_deleted) 273 if (ptr->is_deleted)
361 continue; 274 continue;
362 if (ptr->domainname) { 275 if (ptr->domainname) {
@@ -376,7 +289,6 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
376 } 289 }
377 flag = true; 290 flag = true;
378 } 291 }
379 up_read(&tomoyo_domain_initializer_list_lock);
380 return flag; 292 return flag;
381} 293}
382 294
@@ -418,8 +330,7 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
418 * "<kernel> /usr/sbin/sshd /bin/bash /usr/bin/passwd" domain, unless 330 * "<kernel> /usr/sbin/sshd /bin/bash /usr/bin/passwd" domain, unless
419 * explicitly specified by "initialize_domain". 331 * explicitly specified by "initialize_domain".
420 */ 332 */
421static LIST_HEAD(tomoyo_domain_keeper_list); 333LIST_HEAD(tomoyo_domain_keeper_list);
422static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
423 334
424/** 335/**
425 * tomoyo_update_domain_keeper_entry - Update "struct tomoyo_domain_keeper_entry" list. 336 * tomoyo_update_domain_keeper_entry - Update "struct tomoyo_domain_keeper_entry" list.
@@ -430,59 +341,64 @@ static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
430 * @is_delete: True if it is a delete request. 341 * @is_delete: True if it is a delete request.
431 * 342 *
432 * Returns 0 on success, negative value otherwise. 343 * Returns 0 on success, negative value otherwise.
344 *
345 * Caller holds tomoyo_read_lock().
433 */ 346 */
434static int tomoyo_update_domain_keeper_entry(const char *domainname, 347static int tomoyo_update_domain_keeper_entry(const char *domainname,
435 const char *program, 348 const char *program,
436 const bool is_not, 349 const bool is_not,
437 const bool is_delete) 350 const bool is_delete)
438{ 351{
439 struct tomoyo_domain_keeper_entry *new_entry; 352 struct tomoyo_domain_keeper_entry *entry = NULL;
440 struct tomoyo_domain_keeper_entry *ptr; 353 struct tomoyo_domain_keeper_entry *ptr;
441 const struct tomoyo_path_info *saved_domainname; 354 const struct tomoyo_path_info *saved_domainname = NULL;
442 const struct tomoyo_path_info *saved_program = NULL; 355 const struct tomoyo_path_info *saved_program = NULL;
443 int error = -ENOMEM; 356 int error = is_delete ? -ENOENT : -ENOMEM;
444 bool is_last_name = false; 357 bool is_last_name = false;
445 358
446 if (!tomoyo_is_domain_def(domainname) && 359 if (!tomoyo_is_domain_def(domainname) &&
447 tomoyo_is_correct_path(domainname, 1, -1, -1, __func__)) 360 tomoyo_is_correct_path(domainname, 1, -1, -1))
448 is_last_name = true; 361 is_last_name = true;
449 else if (!tomoyo_is_correct_domain(domainname, __func__)) 362 else if (!tomoyo_is_correct_domain(domainname))
450 return -EINVAL; 363 return -EINVAL;
451 if (program) { 364 if (program) {
452 if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__)) 365 if (!tomoyo_is_correct_path(program, 1, -1, -1))
453 return -EINVAL; 366 return -EINVAL;
454 saved_program = tomoyo_save_name(program); 367 saved_program = tomoyo_get_name(program);
455 if (!saved_program) 368 if (!saved_program)
456 return -ENOMEM; 369 goto out;
457 } 370 }
458 saved_domainname = tomoyo_save_name(domainname); 371 saved_domainname = tomoyo_get_name(domainname);
459 if (!saved_domainname) 372 if (!saved_domainname)
460 return -ENOMEM; 373 goto out;
461 down_write(&tomoyo_domain_keeper_list_lock); 374 if (!is_delete)
462 list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) { 375 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
376 mutex_lock(&tomoyo_policy_lock);
377 list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
463 if (ptr->is_not != is_not || 378 if (ptr->is_not != is_not ||
464 ptr->domainname != saved_domainname || 379 ptr->domainname != saved_domainname ||
465 ptr->program != saved_program) 380 ptr->program != saved_program)
466 continue; 381 continue;
467 ptr->is_deleted = is_delete; 382 ptr->is_deleted = is_delete;
468 error = 0; 383 error = 0;
469 goto out; 384 break;
470 } 385 }
471 if (is_delete) { 386 if (!is_delete && error && tomoyo_memory_ok(entry)) {
472 error = -ENOENT; 387 entry->domainname = saved_domainname;
473 goto out; 388 saved_domainname = NULL;
389 entry->program = saved_program;
390 saved_program = NULL;
391 entry->is_not = is_not;
392 entry->is_last_name = is_last_name;
393 list_add_tail_rcu(&entry->list, &tomoyo_domain_keeper_list);
394 entry = NULL;
395 error = 0;
474 } 396 }
475 new_entry = tomoyo_alloc_element(sizeof(*new_entry)); 397 mutex_unlock(&tomoyo_policy_lock);
476 if (!new_entry)
477 goto out;
478 new_entry->domainname = saved_domainname;
479 new_entry->program = saved_program;
480 new_entry->is_not = is_not;
481 new_entry->is_last_name = is_last_name;
482 list_add_tail(&new_entry->list, &tomoyo_domain_keeper_list);
483 error = 0;
484 out: 398 out:
485 up_write(&tomoyo_domain_keeper_list_lock); 399 tomoyo_put_name(saved_domainname);
400 tomoyo_put_name(saved_program);
401 kfree(entry);
486 return error; 402 return error;
487} 403}
488 404
@@ -493,6 +409,7 @@ static int tomoyo_update_domain_keeper_entry(const char *domainname,
493 * @is_not: True if it is "no_keep_domain" entry. 409 * @is_not: True if it is "no_keep_domain" entry.
494 * @is_delete: True if it is a delete request. 410 * @is_delete: True if it is a delete request.
495 * 411 *
412 * Caller holds tomoyo_read_lock().
496 */ 413 */
497int tomoyo_write_domain_keeper_policy(char *data, const bool is_not, 414int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
498 const bool is_delete) 415 const bool is_delete)
@@ -513,13 +430,14 @@ int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
513 * @head: Pointer to "struct tomoyo_io_buffer". 430 * @head: Pointer to "struct tomoyo_io_buffer".
514 * 431 *
515 * Returns true on success, false otherwise. 432 * Returns true on success, false otherwise.
433 *
434 * Caller holds tomoyo_read_lock().
516 */ 435 */
517bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head) 436bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
518{ 437{
519 struct list_head *pos; 438 struct list_head *pos;
520 bool done = true; 439 bool done = true;
521 440
522 down_read(&tomoyo_domain_keeper_list_lock);
523 list_for_each_cookie(pos, head->read_var2, 441 list_for_each_cookie(pos, head->read_var2,
524 &tomoyo_domain_keeper_list) { 442 &tomoyo_domain_keeper_list) {
525 struct tomoyo_domain_keeper_entry *ptr; 443 struct tomoyo_domain_keeper_entry *ptr;
@@ -542,7 +460,6 @@ bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
542 if (!done) 460 if (!done)
543 break; 461 break;
544 } 462 }
545 up_read(&tomoyo_domain_keeper_list_lock);
546 return done; 463 return done;
547} 464}
548 465
@@ -555,6 +472,8 @@ bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
555 * 472 *
556 * Returns true if executing @program supresses domain transition, 473 * Returns true if executing @program supresses domain transition,
557 * false otherwise. 474 * false otherwise.
475 *
476 * Caller holds tomoyo_read_lock().
558 */ 477 */
559static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname, 478static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
560 const struct tomoyo_path_info *program, 479 const struct tomoyo_path_info *program,
@@ -563,8 +482,7 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
563 struct tomoyo_domain_keeper_entry *ptr; 482 struct tomoyo_domain_keeper_entry *ptr;
564 bool flag = false; 483 bool flag = false;
565 484
566 down_read(&tomoyo_domain_keeper_list_lock); 485 list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
567 list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
568 if (ptr->is_deleted) 486 if (ptr->is_deleted)
569 continue; 487 continue;
570 if (!ptr->is_last_name) { 488 if (!ptr->is_last_name) {
@@ -582,7 +500,6 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
582 } 500 }
583 flag = true; 501 flag = true;
584 } 502 }
585 up_read(&tomoyo_domain_keeper_list_lock);
586 return flag; 503 return flag;
587} 504}
588 505
@@ -616,8 +533,7 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
616 * /bin/busybox and domainname which the current process will belong to after 533 * /bin/busybox and domainname which the current process will belong to after
617 * execve() succeeds is calculated using /bin/cat rather than /bin/busybox . 534 * execve() succeeds is calculated using /bin/cat rather than /bin/busybox .
618 */ 535 */
619static LIST_HEAD(tomoyo_alias_list); 536LIST_HEAD(tomoyo_alias_list);
620static DECLARE_RWSEM(tomoyo_alias_list_lock);
621 537
622/** 538/**
623 * tomoyo_update_alias_entry - Update "struct tomoyo_alias_entry" list. 539 * tomoyo_update_alias_entry - Update "struct tomoyo_alias_entry" list.
@@ -627,46 +543,51 @@ static DECLARE_RWSEM(tomoyo_alias_list_lock);
627 * @is_delete: True if it is a delete request. 543 * @is_delete: True if it is a delete request.
628 * 544 *
629 * Returns 0 on success, negative value otherwise. 545 * Returns 0 on success, negative value otherwise.
546 *
547 * Caller holds tomoyo_read_lock().
630 */ 548 */
631static int tomoyo_update_alias_entry(const char *original_name, 549static int tomoyo_update_alias_entry(const char *original_name,
632 const char *aliased_name, 550 const char *aliased_name,
633 const bool is_delete) 551 const bool is_delete)
634{ 552{
635 struct tomoyo_alias_entry *new_entry; 553 struct tomoyo_alias_entry *entry = NULL;
636 struct tomoyo_alias_entry *ptr; 554 struct tomoyo_alias_entry *ptr;
637 const struct tomoyo_path_info *saved_original_name; 555 const struct tomoyo_path_info *saved_original_name;
638 const struct tomoyo_path_info *saved_aliased_name; 556 const struct tomoyo_path_info *saved_aliased_name;
639 int error = -ENOMEM; 557 int error = is_delete ? -ENOENT : -ENOMEM;
640 558
641 if (!tomoyo_is_correct_path(original_name, 1, -1, -1, __func__) || 559 if (!tomoyo_is_correct_path(original_name, 1, -1, -1) ||
642 !tomoyo_is_correct_path(aliased_name, 1, -1, -1, __func__)) 560 !tomoyo_is_correct_path(aliased_name, 1, -1, -1))
643 return -EINVAL; /* No patterns allowed. */ 561 return -EINVAL; /* No patterns allowed. */
644 saved_original_name = tomoyo_save_name(original_name); 562 saved_original_name = tomoyo_get_name(original_name);
645 saved_aliased_name = tomoyo_save_name(aliased_name); 563 saved_aliased_name = tomoyo_get_name(aliased_name);
646 if (!saved_original_name || !saved_aliased_name) 564 if (!saved_original_name || !saved_aliased_name)
647 return -ENOMEM; 565 goto out;
648 down_write(&tomoyo_alias_list_lock); 566 if (!is_delete)
649 list_for_each_entry(ptr, &tomoyo_alias_list, list) { 567 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
568 mutex_lock(&tomoyo_policy_lock);
569 list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
650 if (ptr->original_name != saved_original_name || 570 if (ptr->original_name != saved_original_name ||
651 ptr->aliased_name != saved_aliased_name) 571 ptr->aliased_name != saved_aliased_name)
652 continue; 572 continue;
653 ptr->is_deleted = is_delete; 573 ptr->is_deleted = is_delete;
654 error = 0; 574 error = 0;
655 goto out; 575 break;
656 } 576 }
657 if (is_delete) { 577 if (!is_delete && error && tomoyo_memory_ok(entry)) {
658 error = -ENOENT; 578 entry->original_name = saved_original_name;
659 goto out; 579 saved_original_name = NULL;
580 entry->aliased_name = saved_aliased_name;
581 saved_aliased_name = NULL;
582 list_add_tail_rcu(&entry->list, &tomoyo_alias_list);
583 entry = NULL;
584 error = 0;
660 } 585 }
661 new_entry = tomoyo_alloc_element(sizeof(*new_entry)); 586 mutex_unlock(&tomoyo_policy_lock);
662 if (!new_entry)
663 goto out;
664 new_entry->original_name = saved_original_name;
665 new_entry->aliased_name = saved_aliased_name;
666 list_add_tail(&new_entry->list, &tomoyo_alias_list);
667 error = 0;
668 out: 587 out:
669 up_write(&tomoyo_alias_list_lock); 588 tomoyo_put_name(saved_original_name);
589 tomoyo_put_name(saved_aliased_name);
590 kfree(entry);
670 return error; 591 return error;
671} 592}
672 593
@@ -676,13 +597,14 @@ static int tomoyo_update_alias_entry(const char *original_name,
676 * @head: Pointer to "struct tomoyo_io_buffer". 597 * @head: Pointer to "struct tomoyo_io_buffer".
677 * 598 *
678 * Returns true on success, false otherwise. 599 * Returns true on success, false otherwise.
600 *
601 * Caller holds tomoyo_read_lock().
679 */ 602 */
680bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head) 603bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
681{ 604{
682 struct list_head *pos; 605 struct list_head *pos;
683 bool done = true; 606 bool done = true;
684 607
685 down_read(&tomoyo_alias_list_lock);
686 list_for_each_cookie(pos, head->read_var2, &tomoyo_alias_list) { 608 list_for_each_cookie(pos, head->read_var2, &tomoyo_alias_list) {
687 struct tomoyo_alias_entry *ptr; 609 struct tomoyo_alias_entry *ptr;
688 610
@@ -695,7 +617,6 @@ bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
695 if (!done) 617 if (!done)
696 break; 618 break;
697 } 619 }
698 up_read(&tomoyo_alias_list_lock);
699 return done; 620 return done;
700} 621}
701 622
@@ -706,6 +627,8 @@ bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
706 * @is_delete: True if it is a delete request. 627 * @is_delete: True if it is a delete request.
707 * 628 *
708 * Returns 0 on success, negative value otherwise. 629 * Returns 0 on success, negative value otherwise.
630 *
631 * Caller holds tomoyo_read_lock().
709 */ 632 */
710int tomoyo_write_alias_policy(char *data, const bool is_delete) 633int tomoyo_write_alias_policy(char *data, const bool is_delete)
711{ 634{
@@ -724,63 +647,46 @@ int tomoyo_write_alias_policy(char *data, const bool is_delete)
724 * @profile: Profile number to assign if the domain was newly created. 647 * @profile: Profile number to assign if the domain was newly created.
725 * 648 *
726 * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise. 649 * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
650 *
651 * Caller holds tomoyo_read_lock().
727 */ 652 */
728struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char * 653struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
729 domainname, 654 domainname,
730 const u8 profile) 655 const u8 profile)
731{ 656{
732 struct tomoyo_domain_info *domain = NULL; 657 struct tomoyo_domain_info *entry;
658 struct tomoyo_domain_info *domain;
733 const struct tomoyo_path_info *saved_domainname; 659 const struct tomoyo_path_info *saved_domainname;
660 bool found = false;
734 661
735 down_write(&tomoyo_domain_list_lock); 662 if (!tomoyo_is_correct_domain(domainname))
736 domain = tomoyo_find_domain(domainname); 663 return NULL;
737 if (domain) 664 saved_domainname = tomoyo_get_name(domainname);
738 goto out;
739 if (!tomoyo_is_correct_domain(domainname, __func__))
740 goto out;
741 saved_domainname = tomoyo_save_name(domainname);
742 if (!saved_domainname) 665 if (!saved_domainname)
743 goto out; 666 return NULL;
744 /* Can I reuse memory of deleted domain? */ 667 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
745 list_for_each_entry(domain, &tomoyo_domain_list, list) { 668 mutex_lock(&tomoyo_policy_lock);
746 struct task_struct *p; 669 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
747 struct tomoyo_acl_info *ptr; 670 if (domain->is_deleted ||
748 bool flag; 671 tomoyo_pathcmp(saved_domainname, domain->domainname))
749 if (!domain->is_deleted ||
750 domain->domainname != saved_domainname)
751 continue; 672 continue;
752 flag = false; 673 found = true;
753 read_lock(&tasklist_lock); 674 break;
754 for_each_process(p) {
755 if (tomoyo_real_domain(p) != domain)
756 continue;
757 flag = true;
758 break;
759 }
760 read_unlock(&tasklist_lock);
761 if (flag)
762 continue;
763 list_for_each_entry(ptr, &domain->acl_info_list, list) {
764 ptr->type |= TOMOYO_ACL_DELETED;
765 }
766 tomoyo_set_domain_flag(domain, true, domain->flags);
767 domain->profile = profile;
768 domain->quota_warned = false;
769 mb(); /* Avoid out-of-order execution. */
770 domain->is_deleted = false;
771 goto out;
772 } 675 }
773 /* No memory reusable. Create using new memory. */ 676 if (!found && tomoyo_memory_ok(entry)) {
774 domain = tomoyo_alloc_element(sizeof(*domain)); 677 INIT_LIST_HEAD(&entry->acl_info_list);
775 if (domain) { 678 entry->domainname = saved_domainname;
776 INIT_LIST_HEAD(&domain->acl_info_list); 679 saved_domainname = NULL;
777 domain->domainname = saved_domainname; 680 entry->profile = profile;
778 domain->profile = profile; 681 list_add_tail_rcu(&entry->list, &tomoyo_domain_list);
779 list_add_tail(&domain->list, &tomoyo_domain_list); 682 domain = entry;
683 entry = NULL;
684 found = true;
780 } 685 }
781 out: 686 mutex_unlock(&tomoyo_policy_lock);
782 up_write(&tomoyo_domain_list_lock); 687 tomoyo_put_name(saved_domainname);
783 return domain; 688 kfree(entry);
689 return found ? domain : NULL;
784} 690}
785 691
786/** 692/**
@@ -789,6 +695,8 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
789 * @bprm: Pointer to "struct linux_binprm". 695 * @bprm: Pointer to "struct linux_binprm".
790 * 696 *
791 * Returns 0 on success, negative value otherwise. 697 * Returns 0 on success, negative value otherwise.
698 *
699 * Caller holds tomoyo_read_lock().
792 */ 700 */
793int tomoyo_find_next_domain(struct linux_binprm *bprm) 701int tomoyo_find_next_domain(struct linux_binprm *bprm)
794{ 702{
@@ -796,7 +704,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
796 * This function assumes that the size of buffer returned by 704 * This function assumes that the size of buffer returned by
797 * tomoyo_realpath() = TOMOYO_MAX_PATHNAME_LEN. 705 * tomoyo_realpath() = TOMOYO_MAX_PATHNAME_LEN.
798 */ 706 */
799 struct tomoyo_page_buffer *tmp = tomoyo_alloc(sizeof(*tmp)); 707 struct tomoyo_page_buffer *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
800 struct tomoyo_domain_info *old_domain = tomoyo_domain(); 708 struct tomoyo_domain_info *old_domain = tomoyo_domain();
801 struct tomoyo_domain_info *domain = NULL; 709 struct tomoyo_domain_info *domain = NULL;
802 const char *old_domain_name = old_domain->domainname->name; 710 const char *old_domain_name = old_domain->domainname->name;
@@ -849,8 +757,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
849 if (tomoyo_pathcmp(&r, &s)) { 757 if (tomoyo_pathcmp(&r, &s)) {
850 struct tomoyo_alias_entry *ptr; 758 struct tomoyo_alias_entry *ptr;
851 /* Is this program allowed to be called via symbolic links? */ 759 /* Is this program allowed to be called via symbolic links? */
852 down_read(&tomoyo_alias_list_lock); 760 list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
853 list_for_each_entry(ptr, &tomoyo_alias_list, list) {
854 if (ptr->is_deleted || 761 if (ptr->is_deleted ||
855 tomoyo_pathcmp(&r, ptr->original_name) || 762 tomoyo_pathcmp(&r, ptr->original_name) ||
856 tomoyo_pathcmp(&s, ptr->aliased_name)) 763 tomoyo_pathcmp(&s, ptr->aliased_name))
@@ -861,7 +768,6 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
861 tomoyo_fill_path_info(&r); 768 tomoyo_fill_path_info(&r);
862 break; 769 break;
863 } 770 }
864 up_read(&tomoyo_alias_list_lock);
865 } 771 }
866 772
867 /* Check execute permission. */ 773 /* Check execute permission. */
@@ -892,9 +798,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
892 } 798 }
893 if (domain || strlen(new_domain_name) >= TOMOYO_MAX_PATHNAME_LEN) 799 if (domain || strlen(new_domain_name) >= TOMOYO_MAX_PATHNAME_LEN)
894 goto done; 800 goto done;
895 down_read(&tomoyo_domain_list_lock);
896 domain = tomoyo_find_domain(new_domain_name); 801 domain = tomoyo_find_domain(new_domain_name);
897 up_read(&tomoyo_domain_list_lock);
898 if (domain) 802 if (domain)
899 goto done; 803 goto done;
900 if (is_enforce) 804 if (is_enforce)
@@ -909,14 +813,15 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
909 if (is_enforce) 813 if (is_enforce)
910 retval = -EPERM; 814 retval = -EPERM;
911 else 815 else
912 tomoyo_set_domain_flag(old_domain, false, 816 old_domain->transition_failed = true;
913 TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
914 out: 817 out:
915 if (!domain) 818 if (!domain)
916 domain = old_domain; 819 domain = old_domain;
820 /* Update reference count on "struct tomoyo_domain_info". */
821 atomic_inc(&domain->users);
917 bprm->cred->security = domain; 822 bprm->cred->security = domain;
918 tomoyo_free(real_program_name); 823 kfree(real_program_name);
919 tomoyo_free(symlink_program_name); 824 kfree(symlink_program_name);
920 tomoyo_free(tmp); 825 kfree(tmp);
921 return retval; 826 return retval;
922} 827}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 9a6c58881c0a..1b24304edb7d 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -10,108 +10,64 @@
10 */ 10 */
11 11
12#include "common.h" 12#include "common.h"
13#include "tomoyo.h"
14#include "realpath.h"
15
16/*
17 * tomoyo_globally_readable_file_entry is a structure which is used for holding
18 * "allow_read" entries.
19 * It has following fields.
20 *
21 * (1) "list" which is linked to tomoyo_globally_readable_list .
22 * (2) "filename" is a pathname which is allowed to open(O_RDONLY).
23 * (3) "is_deleted" is a bool which is true if marked as deleted, false
24 * otherwise.
25 */
26struct tomoyo_globally_readable_file_entry {
27 struct list_head list;
28 const struct tomoyo_path_info *filename;
29 bool is_deleted;
30};
31
32/*
33 * tomoyo_pattern_entry is a structure which is used for holding
34 * "tomoyo_pattern_list" entries.
35 * It has following fields.
36 *
37 * (1) "list" which is linked to tomoyo_pattern_list .
38 * (2) "pattern" is a pathname pattern which is used for converting pathnames
39 * to pathname patterns during learning mode.
40 * (3) "is_deleted" is a bool which is true if marked as deleted, false
41 * otherwise.
42 */
43struct tomoyo_pattern_entry {
44 struct list_head list;
45 const struct tomoyo_path_info *pattern;
46 bool is_deleted;
47};
48
49/*
50 * tomoyo_no_rewrite_entry is a structure which is used for holding
51 * "deny_rewrite" entries.
52 * It has following fields.
53 *
54 * (1) "list" which is linked to tomoyo_no_rewrite_list .
55 * (2) "pattern" is a pathname which is by default not permitted to modify
56 * already existing content.
57 * (3) "is_deleted" is a bool which is true if marked as deleted, false
58 * otherwise.
59 */
60struct tomoyo_no_rewrite_entry {
61 struct list_head list;
62 const struct tomoyo_path_info *pattern;
63 bool is_deleted;
64};
65 13
66/* Keyword array for single path operations. */ 14/* Keyword array for single path operations. */
67static const char *tomoyo_sp_keyword[TOMOYO_MAX_SINGLE_PATH_OPERATION] = { 15static const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
68 [TOMOYO_TYPE_READ_WRITE_ACL] = "read/write", 16 [TOMOYO_TYPE_READ_WRITE] = "read/write",
69 [TOMOYO_TYPE_EXECUTE_ACL] = "execute", 17 [TOMOYO_TYPE_EXECUTE] = "execute",
70 [TOMOYO_TYPE_READ_ACL] = "read", 18 [TOMOYO_TYPE_READ] = "read",
71 [TOMOYO_TYPE_WRITE_ACL] = "write", 19 [TOMOYO_TYPE_WRITE] = "write",
72 [TOMOYO_TYPE_CREATE_ACL] = "create", 20 [TOMOYO_TYPE_CREATE] = "create",
73 [TOMOYO_TYPE_UNLINK_ACL] = "unlink", 21 [TOMOYO_TYPE_UNLINK] = "unlink",
74 [TOMOYO_TYPE_MKDIR_ACL] = "mkdir", 22 [TOMOYO_TYPE_MKDIR] = "mkdir",
75 [TOMOYO_TYPE_RMDIR_ACL] = "rmdir", 23 [TOMOYO_TYPE_RMDIR] = "rmdir",
76 [TOMOYO_TYPE_MKFIFO_ACL] = "mkfifo", 24 [TOMOYO_TYPE_MKFIFO] = "mkfifo",
77 [TOMOYO_TYPE_MKSOCK_ACL] = "mksock", 25 [TOMOYO_TYPE_MKSOCK] = "mksock",
78 [TOMOYO_TYPE_MKBLOCK_ACL] = "mkblock", 26 [TOMOYO_TYPE_MKBLOCK] = "mkblock",
79 [TOMOYO_TYPE_MKCHAR_ACL] = "mkchar", 27 [TOMOYO_TYPE_MKCHAR] = "mkchar",
80 [TOMOYO_TYPE_TRUNCATE_ACL] = "truncate", 28 [TOMOYO_TYPE_TRUNCATE] = "truncate",
81 [TOMOYO_TYPE_SYMLINK_ACL] = "symlink", 29 [TOMOYO_TYPE_SYMLINK] = "symlink",
82 [TOMOYO_TYPE_REWRITE_ACL] = "rewrite", 30 [TOMOYO_TYPE_REWRITE] = "rewrite",
31 [TOMOYO_TYPE_IOCTL] = "ioctl",
32 [TOMOYO_TYPE_CHMOD] = "chmod",
33 [TOMOYO_TYPE_CHOWN] = "chown",
34 [TOMOYO_TYPE_CHGRP] = "chgrp",
35 [TOMOYO_TYPE_CHROOT] = "chroot",
36 [TOMOYO_TYPE_MOUNT] = "mount",
37 [TOMOYO_TYPE_UMOUNT] = "unmount",
83}; 38};
84 39
85/* Keyword array for double path operations. */ 40/* Keyword array for double path operations. */
86static const char *tomoyo_dp_keyword[TOMOYO_MAX_DOUBLE_PATH_OPERATION] = { 41static const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION] = {
87 [TOMOYO_TYPE_LINK_ACL] = "link", 42 [TOMOYO_TYPE_LINK] = "link",
88 [TOMOYO_TYPE_RENAME_ACL] = "rename", 43 [TOMOYO_TYPE_RENAME] = "rename",
44 [TOMOYO_TYPE_PIVOT_ROOT] = "pivot_root",
89}; 45};
90 46
91/** 47/**
92 * tomoyo_sp2keyword - Get the name of single path operation. 48 * tomoyo_path2keyword - Get the name of single path operation.
93 * 49 *
94 * @operation: Type of operation. 50 * @operation: Type of operation.
95 * 51 *
96 * Returns the name of single path operation. 52 * Returns the name of single path operation.
97 */ 53 */
98const char *tomoyo_sp2keyword(const u8 operation) 54const char *tomoyo_path2keyword(const u8 operation)
99{ 55{
100 return (operation < TOMOYO_MAX_SINGLE_PATH_OPERATION) 56 return (operation < TOMOYO_MAX_PATH_OPERATION)
101 ? tomoyo_sp_keyword[operation] : NULL; 57 ? tomoyo_path_keyword[operation] : NULL;
102} 58}
103 59
104/** 60/**
105 * tomoyo_dp2keyword - Get the name of double path operation. 61 * tomoyo_path22keyword - Get the name of double path operation.
106 * 62 *
107 * @operation: Type of operation. 63 * @operation: Type of operation.
108 * 64 *
109 * Returns the name of double path operation. 65 * Returns the name of double path operation.
110 */ 66 */
111const char *tomoyo_dp2keyword(const u8 operation) 67const char *tomoyo_path22keyword(const u8 operation)
112{ 68{
113 return (operation < TOMOYO_MAX_DOUBLE_PATH_OPERATION) 69 return (operation < TOMOYO_MAX_PATH2_OPERATION)
114 ? tomoyo_dp_keyword[operation] : NULL; 70 ? tomoyo_path2_keyword[operation] : NULL;
115} 71}
116 72
117/** 73/**
@@ -142,7 +98,8 @@ static bool tomoyo_strendswith(const char *name, const char *tail)
142static struct tomoyo_path_info *tomoyo_get_path(struct path *path) 98static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
143{ 99{
144 int error; 100 int error;
145 struct tomoyo_path_info_with_data *buf = tomoyo_alloc(sizeof(*buf)); 101 struct tomoyo_path_info_with_data *buf = kzalloc(sizeof(*buf),
102 GFP_KERNEL);
146 103
147 if (!buf) 104 if (!buf)
148 return NULL; 105 return NULL;
@@ -154,20 +111,17 @@ static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
154 tomoyo_fill_path_info(&buf->head); 111 tomoyo_fill_path_info(&buf->head);
155 return &buf->head; 112 return &buf->head;
156 } 113 }
157 tomoyo_free(buf); 114 kfree(buf);
158 return NULL; 115 return NULL;
159} 116}
160 117
161/* Lock for domain->acl_info_list. */ 118static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
162DECLARE_RWSEM(tomoyo_domain_acl_info_list_lock); 119 const char *filename2,
163 120 struct tomoyo_domain_info *const domain,
164static int tomoyo_update_double_path_acl(const u8 type, const char *filename1, 121 const bool is_delete);
165 const char *filename2, 122static int tomoyo_update_path_acl(const u8 type, const char *filename,
166 struct tomoyo_domain_info * 123 struct tomoyo_domain_info *const domain,
167 const domain, const bool is_delete); 124 const bool is_delete);
168static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
169 struct tomoyo_domain_info *
170 const domain, const bool is_delete);
171 125
172/* 126/*
173 * tomoyo_globally_readable_list is used for holding list of pathnames which 127 * tomoyo_globally_readable_list is used for holding list of pathnames which
@@ -194,8 +148,7 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
194 * given "allow_read /lib/libc-2.5.so" to the domain which current process 148 * given "allow_read /lib/libc-2.5.so" to the domain which current process
195 * belongs to. 149 * belongs to.
196 */ 150 */
197static LIST_HEAD(tomoyo_globally_readable_list); 151LIST_HEAD(tomoyo_globally_readable_list);
198static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
199 152
200/** 153/**
201 * tomoyo_update_globally_readable_entry - Update "struct tomoyo_globally_readable_file_entry" list. 154 * tomoyo_update_globally_readable_entry - Update "struct tomoyo_globally_readable_file_entry" list.
@@ -204,40 +157,42 @@ static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
204 * @is_delete: True if it is a delete request. 157 * @is_delete: True if it is a delete request.
205 * 158 *
206 * Returns 0 on success, negative value otherwise. 159 * Returns 0 on success, negative value otherwise.
160 *
161 * Caller holds tomoyo_read_lock().
207 */ 162 */
208static int tomoyo_update_globally_readable_entry(const char *filename, 163static int tomoyo_update_globally_readable_entry(const char *filename,
209 const bool is_delete) 164 const bool is_delete)
210{ 165{
211 struct tomoyo_globally_readable_file_entry *new_entry; 166 struct tomoyo_globally_readable_file_entry *entry = NULL;
212 struct tomoyo_globally_readable_file_entry *ptr; 167 struct tomoyo_globally_readable_file_entry *ptr;
213 const struct tomoyo_path_info *saved_filename; 168 const struct tomoyo_path_info *saved_filename;
214 int error = -ENOMEM; 169 int error = is_delete ? -ENOENT : -ENOMEM;
215 170
216 if (!tomoyo_is_correct_path(filename, 1, 0, -1, __func__)) 171 if (!tomoyo_is_correct_path(filename, 1, 0, -1))
217 return -EINVAL; 172 return -EINVAL;
218 saved_filename = tomoyo_save_name(filename); 173 saved_filename = tomoyo_get_name(filename);
219 if (!saved_filename) 174 if (!saved_filename)
220 return -ENOMEM; 175 return -ENOMEM;
221 down_write(&tomoyo_globally_readable_list_lock); 176 if (!is_delete)
222 list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) { 177 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
178 mutex_lock(&tomoyo_policy_lock);
179 list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
223 if (ptr->filename != saved_filename) 180 if (ptr->filename != saved_filename)
224 continue; 181 continue;
225 ptr->is_deleted = is_delete; 182 ptr->is_deleted = is_delete;
226 error = 0; 183 error = 0;
227 goto out; 184 break;
228 } 185 }
229 if (is_delete) { 186 if (!is_delete && error && tomoyo_memory_ok(entry)) {
230 error = -ENOENT; 187 entry->filename = saved_filename;
231 goto out; 188 saved_filename = NULL;
189 list_add_tail_rcu(&entry->list, &tomoyo_globally_readable_list);
190 entry = NULL;
191 error = 0;
232 } 192 }
233 new_entry = tomoyo_alloc_element(sizeof(*new_entry)); 193 mutex_unlock(&tomoyo_policy_lock);
234 if (!new_entry) 194 tomoyo_put_name(saved_filename);
235 goto out; 195 kfree(entry);
236 new_entry->filename = saved_filename;
237 list_add_tail(&new_entry->list, &tomoyo_globally_readable_list);
238 error = 0;
239 out:
240 up_write(&tomoyo_globally_readable_list_lock);
241 return error; 196 return error;
242} 197}
243 198
@@ -247,21 +202,22 @@ static int tomoyo_update_globally_readable_entry(const char *filename,
247 * @filename: The filename to check. 202 * @filename: The filename to check.
248 * 203 *
249 * Returns true if any domain can open @filename for reading, false otherwise. 204 * Returns true if any domain can open @filename for reading, false otherwise.
205 *
206 * Caller holds tomoyo_read_lock().
250 */ 207 */
251static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info * 208static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
252 filename) 209 filename)
253{ 210{
254 struct tomoyo_globally_readable_file_entry *ptr; 211 struct tomoyo_globally_readable_file_entry *ptr;
255 bool found = false; 212 bool found = false;
256 down_read(&tomoyo_globally_readable_list_lock); 213
257 list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) { 214 list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
258 if (!ptr->is_deleted && 215 if (!ptr->is_deleted &&
259 tomoyo_path_matches_pattern(filename, ptr->filename)) { 216 tomoyo_path_matches_pattern(filename, ptr->filename)) {
260 found = true; 217 found = true;
261 break; 218 break;
262 } 219 }
263 } 220 }
264 up_read(&tomoyo_globally_readable_list_lock);
265 return found; 221 return found;
266} 222}
267 223
@@ -272,6 +228,8 @@ static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
272 * @is_delete: True if it is a delete request. 228 * @is_delete: True if it is a delete request.
273 * 229 *
274 * Returns 0 on success, negative value otherwise. 230 * Returns 0 on success, negative value otherwise.
231 *
232 * Caller holds tomoyo_read_lock().
275 */ 233 */
276int tomoyo_write_globally_readable_policy(char *data, const bool is_delete) 234int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
277{ 235{
@@ -284,13 +242,14 @@ int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
284 * @head: Pointer to "struct tomoyo_io_buffer". 242 * @head: Pointer to "struct tomoyo_io_buffer".
285 * 243 *
286 * Returns true on success, false otherwise. 244 * Returns true on success, false otherwise.
245 *
246 * Caller holds tomoyo_read_lock().
287 */ 247 */
288bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head) 248bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
289{ 249{
290 struct list_head *pos; 250 struct list_head *pos;
291 bool done = true; 251 bool done = true;
292 252
293 down_read(&tomoyo_globally_readable_list_lock);
294 list_for_each_cookie(pos, head->read_var2, 253 list_for_each_cookie(pos, head->read_var2,
295 &tomoyo_globally_readable_list) { 254 &tomoyo_globally_readable_list) {
296 struct tomoyo_globally_readable_file_entry *ptr; 255 struct tomoyo_globally_readable_file_entry *ptr;
@@ -304,7 +263,6 @@ bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
304 if (!done) 263 if (!done)
305 break; 264 break;
306 } 265 }
307 up_read(&tomoyo_globally_readable_list_lock);
308 return done; 266 return done;
309} 267}
310 268
@@ -337,8 +295,7 @@ bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
337 * which pretends as if /proc/self/ is not a symlink; so that we can forbid 295 * which pretends as if /proc/self/ is not a symlink; so that we can forbid
338 * current process from accessing other process's information. 296 * current process from accessing other process's information.
339 */ 297 */
340static LIST_HEAD(tomoyo_pattern_list); 298LIST_HEAD(tomoyo_pattern_list);
341static DECLARE_RWSEM(tomoyo_pattern_list_lock);
342 299
343/** 300/**
344 * tomoyo_update_file_pattern_entry - Update "struct tomoyo_pattern_entry" list. 301 * tomoyo_update_file_pattern_entry - Update "struct tomoyo_pattern_entry" list.
@@ -347,40 +304,43 @@ static DECLARE_RWSEM(tomoyo_pattern_list_lock);
347 * @is_delete: True if it is a delete request. 304 * @is_delete: True if it is a delete request.
348 * 305 *
349 * Returns 0 on success, negative value otherwise. 306 * Returns 0 on success, negative value otherwise.
307 *
308 * Caller holds tomoyo_read_lock().
350 */ 309 */
351static int tomoyo_update_file_pattern_entry(const char *pattern, 310static int tomoyo_update_file_pattern_entry(const char *pattern,
352 const bool is_delete) 311 const bool is_delete)
353{ 312{
354 struct tomoyo_pattern_entry *new_entry; 313 struct tomoyo_pattern_entry *entry = NULL;
355 struct tomoyo_pattern_entry *ptr; 314 struct tomoyo_pattern_entry *ptr;
356 const struct tomoyo_path_info *saved_pattern; 315 const struct tomoyo_path_info *saved_pattern;
357 int error = -ENOMEM; 316 int error = is_delete ? -ENOENT : -ENOMEM;
358 317
359 if (!tomoyo_is_correct_path(pattern, 0, 1, 0, __func__)) 318 saved_pattern = tomoyo_get_name(pattern);
360 return -EINVAL;
361 saved_pattern = tomoyo_save_name(pattern);
362 if (!saved_pattern) 319 if (!saved_pattern)
363 return -ENOMEM; 320 return error;
364 down_write(&tomoyo_pattern_list_lock); 321 if (!saved_pattern->is_patterned)
365 list_for_each_entry(ptr, &tomoyo_pattern_list, list) { 322 goto out;
323 if (!is_delete)
324 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
325 mutex_lock(&tomoyo_policy_lock);
326 list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
366 if (saved_pattern != ptr->pattern) 327 if (saved_pattern != ptr->pattern)
367 continue; 328 continue;
368 ptr->is_deleted = is_delete; 329 ptr->is_deleted = is_delete;
369 error = 0; 330 error = 0;
370 goto out; 331 break;
371 } 332 }
372 if (is_delete) { 333 if (!is_delete && error && tomoyo_memory_ok(entry)) {
373 error = -ENOENT; 334 entry->pattern = saved_pattern;
374 goto out; 335 saved_pattern = NULL;
336 list_add_tail_rcu(&entry->list, &tomoyo_pattern_list);
337 entry = NULL;
338 error = 0;
375 } 339 }
376 new_entry = tomoyo_alloc_element(sizeof(*new_entry)); 340 mutex_unlock(&tomoyo_policy_lock);
377 if (!new_entry)
378 goto out;
379 new_entry->pattern = saved_pattern;
380 list_add_tail(&new_entry->list, &tomoyo_pattern_list);
381 error = 0;
382 out: 341 out:
383 up_write(&tomoyo_pattern_list_lock); 342 kfree(entry);
343 tomoyo_put_name(saved_pattern);
384 return error; 344 return error;
385} 345}
386 346
@@ -390,6 +350,8 @@ static int tomoyo_update_file_pattern_entry(const char *pattern,
390 * @filename: The filename to find patterned pathname. 350 * @filename: The filename to find patterned pathname.
391 * 351 *
392 * Returns pointer to pathname pattern if matched, @filename otherwise. 352 * Returns pointer to pathname pattern if matched, @filename otherwise.
353 *
354 * Caller holds tomoyo_read_lock().
393 */ 355 */
394static const struct tomoyo_path_info * 356static const struct tomoyo_path_info *
395tomoyo_get_file_pattern(const struct tomoyo_path_info *filename) 357tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
@@ -397,8 +359,7 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
397 struct tomoyo_pattern_entry *ptr; 359 struct tomoyo_pattern_entry *ptr;
398 const struct tomoyo_path_info *pattern = NULL; 360 const struct tomoyo_path_info *pattern = NULL;
399 361
400 down_read(&tomoyo_pattern_list_lock); 362 list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
401 list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
402 if (ptr->is_deleted) 363 if (ptr->is_deleted)
403 continue; 364 continue;
404 if (!tomoyo_path_matches_pattern(filename, ptr->pattern)) 365 if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -411,7 +372,6 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
411 break; 372 break;
412 } 373 }
413 } 374 }
414 up_read(&tomoyo_pattern_list_lock);
415 if (pattern) 375 if (pattern)
416 filename = pattern; 376 filename = pattern;
417 return filename; 377 return filename;
@@ -424,6 +384,8 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
424 * @is_delete: True if it is a delete request. 384 * @is_delete: True if it is a delete request.
425 * 385 *
426 * Returns 0 on success, negative value otherwise. 386 * Returns 0 on success, negative value otherwise.
387 *
388 * Caller holds tomoyo_read_lock().
427 */ 389 */
428int tomoyo_write_pattern_policy(char *data, const bool is_delete) 390int tomoyo_write_pattern_policy(char *data, const bool is_delete)
429{ 391{
@@ -436,13 +398,14 @@ int tomoyo_write_pattern_policy(char *data, const bool is_delete)
436 * @head: Pointer to "struct tomoyo_io_buffer". 398 * @head: Pointer to "struct tomoyo_io_buffer".
437 * 399 *
438 * Returns true on success, false otherwise. 400 * Returns true on success, false otherwise.
401 *
402 * Caller holds tomoyo_read_lock().
439 */ 403 */
440bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head) 404bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
441{ 405{
442 struct list_head *pos; 406 struct list_head *pos;
443 bool done = true; 407 bool done = true;
444 408
445 down_read(&tomoyo_pattern_list_lock);
446 list_for_each_cookie(pos, head->read_var2, &tomoyo_pattern_list) { 409 list_for_each_cookie(pos, head->read_var2, &tomoyo_pattern_list) {
447 struct tomoyo_pattern_entry *ptr; 410 struct tomoyo_pattern_entry *ptr;
448 ptr = list_entry(pos, struct tomoyo_pattern_entry, list); 411 ptr = list_entry(pos, struct tomoyo_pattern_entry, list);
@@ -453,7 +416,6 @@ bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
453 if (!done) 416 if (!done)
454 break; 417 break;
455 } 418 }
456 up_read(&tomoyo_pattern_list_lock);
457 return done; 419 return done;
458} 420}
459 421
@@ -486,8 +448,7 @@ bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
486 * " (deleted)" suffix if the file is already unlink()ed; so that we don't 448 * " (deleted)" suffix if the file is already unlink()ed; so that we don't
487 * need to worry whether the file is already unlink()ed or not. 449 * need to worry whether the file is already unlink()ed or not.
488 */ 450 */
489static LIST_HEAD(tomoyo_no_rewrite_list); 451LIST_HEAD(tomoyo_no_rewrite_list);
490static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
491 452
492/** 453/**
493 * tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite_entry" list. 454 * tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite_entry" list.
@@ -496,39 +457,42 @@ static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
496 * @is_delete: True if it is a delete request. 457 * @is_delete: True if it is a delete request.
497 * 458 *
498 * Returns 0 on success, negative value otherwise. 459 * Returns 0 on success, negative value otherwise.
460 *
461 * Caller holds tomoyo_read_lock().
499 */ 462 */
500static int tomoyo_update_no_rewrite_entry(const char *pattern, 463static int tomoyo_update_no_rewrite_entry(const char *pattern,
501 const bool is_delete) 464 const bool is_delete)
502{ 465{
503 struct tomoyo_no_rewrite_entry *new_entry, *ptr; 466 struct tomoyo_no_rewrite_entry *entry = NULL;
467 struct tomoyo_no_rewrite_entry *ptr;
504 const struct tomoyo_path_info *saved_pattern; 468 const struct tomoyo_path_info *saved_pattern;
505 int error = -ENOMEM; 469 int error = is_delete ? -ENOENT : -ENOMEM;
506 470
507 if (!tomoyo_is_correct_path(pattern, 0, 0, 0, __func__)) 471 if (!tomoyo_is_correct_path(pattern, 0, 0, 0))
508 return -EINVAL; 472 return -EINVAL;
509 saved_pattern = tomoyo_save_name(pattern); 473 saved_pattern = tomoyo_get_name(pattern);
510 if (!saved_pattern) 474 if (!saved_pattern)
511 return -ENOMEM; 475 return error;
512 down_write(&tomoyo_no_rewrite_list_lock); 476 if (!is_delete)
513 list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) { 477 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
478 mutex_lock(&tomoyo_policy_lock);
479 list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
514 if (ptr->pattern != saved_pattern) 480 if (ptr->pattern != saved_pattern)
515 continue; 481 continue;
516 ptr->is_deleted = is_delete; 482 ptr->is_deleted = is_delete;
517 error = 0; 483 error = 0;
518 goto out; 484 break;
519 } 485 }
520 if (is_delete) { 486 if (!is_delete && error && tomoyo_memory_ok(entry)) {
521 error = -ENOENT; 487 entry->pattern = saved_pattern;
522 goto out; 488 saved_pattern = NULL;
489 list_add_tail_rcu(&entry->list, &tomoyo_no_rewrite_list);
490 entry = NULL;
491 error = 0;
523 } 492 }
524 new_entry = tomoyo_alloc_element(sizeof(*new_entry)); 493 mutex_unlock(&tomoyo_policy_lock);
525 if (!new_entry) 494 tomoyo_put_name(saved_pattern);
526 goto out; 495 kfree(entry);
527 new_entry->pattern = saved_pattern;
528 list_add_tail(&new_entry->list, &tomoyo_no_rewrite_list);
529 error = 0;
530 out:
531 up_write(&tomoyo_no_rewrite_list_lock);
532 return error; 496 return error;
533} 497}
534 498
@@ -539,14 +503,15 @@ static int tomoyo_update_no_rewrite_entry(const char *pattern,
539 * 503 *
540 * Returns true if @filename is specified by "deny_rewrite" directive, 504 * Returns true if @filename is specified by "deny_rewrite" directive,
541 * false otherwise. 505 * false otherwise.
506 *
507 * Caller holds tomoyo_read_lock().
542 */ 508 */
543static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename) 509static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
544{ 510{
545 struct tomoyo_no_rewrite_entry *ptr; 511 struct tomoyo_no_rewrite_entry *ptr;
546 bool found = false; 512 bool found = false;
547 513
548 down_read(&tomoyo_no_rewrite_list_lock); 514 list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
549 list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
550 if (ptr->is_deleted) 515 if (ptr->is_deleted)
551 continue; 516 continue;
552 if (!tomoyo_path_matches_pattern(filename, ptr->pattern)) 517 if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -554,7 +519,6 @@ static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
554 found = true; 519 found = true;
555 break; 520 break;
556 } 521 }
557 up_read(&tomoyo_no_rewrite_list_lock);
558 return found; 522 return found;
559} 523}
560 524
@@ -565,6 +529,8 @@ static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
565 * @is_delete: True if it is a delete request. 529 * @is_delete: True if it is a delete request.
566 * 530 *
567 * Returns 0 on success, negative value otherwise. 531 * Returns 0 on success, negative value otherwise.
532 *
533 * Caller holds tomoyo_read_lock().
568 */ 534 */
569int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete) 535int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
570{ 536{
@@ -577,13 +543,14 @@ int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
577 * @head: Pointer to "struct tomoyo_io_buffer". 543 * @head: Pointer to "struct tomoyo_io_buffer".
578 * 544 *
579 * Returns true on success, false otherwise. 545 * Returns true on success, false otherwise.
546 *
547 * Caller holds tomoyo_read_lock().
580 */ 548 */
581bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head) 549bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
582{ 550{
583 struct list_head *pos; 551 struct list_head *pos;
584 bool done = true; 552 bool done = true;
585 553
586 down_read(&tomoyo_no_rewrite_list_lock);
587 list_for_each_cookie(pos, head->read_var2, &tomoyo_no_rewrite_list) { 554 list_for_each_cookie(pos, head->read_var2, &tomoyo_no_rewrite_list) {
588 struct tomoyo_no_rewrite_entry *ptr; 555 struct tomoyo_no_rewrite_entry *ptr;
589 ptr = list_entry(pos, struct tomoyo_no_rewrite_entry, list); 556 ptr = list_entry(pos, struct tomoyo_no_rewrite_entry, list);
@@ -594,7 +561,6 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
594 if (!done) 561 if (!done)
595 break; 562 break;
596 } 563 }
597 up_read(&tomoyo_no_rewrite_list_lock);
598 return done; 564 return done;
599} 565}
600 566
@@ -612,6 +578,8 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
612 * Current policy syntax uses "allow_read/write" instead of "6", 578 * Current policy syntax uses "allow_read/write" instead of "6",
613 * "allow_read" instead of "4", "allow_write" instead of "2", 579 * "allow_read" instead of "4", "allow_write" instead of "2",
614 * "allow_execute" instead of "1". 580 * "allow_execute" instead of "1".
581 *
582 * Caller holds tomoyo_read_lock().
615 */ 583 */
616static int tomoyo_update_file_acl(const char *filename, u8 perm, 584static int tomoyo_update_file_acl(const char *filename, u8 perm,
617 struct tomoyo_domain_info * const domain, 585 struct tomoyo_domain_info * const domain,
@@ -629,19 +597,19 @@ static int tomoyo_update_file_acl(const char *filename, u8 perm,
629 */ 597 */
630 return 0; 598 return 0;
631 if (perm & 4) 599 if (perm & 4)
632 tomoyo_update_single_path_acl(TOMOYO_TYPE_READ_ACL, filename, 600 tomoyo_update_path_acl(TOMOYO_TYPE_READ, filename, domain,
633 domain, is_delete); 601 is_delete);
634 if (perm & 2) 602 if (perm & 2)
635 tomoyo_update_single_path_acl(TOMOYO_TYPE_WRITE_ACL, filename, 603 tomoyo_update_path_acl(TOMOYO_TYPE_WRITE, filename, domain,
636 domain, is_delete); 604 is_delete);
637 if (perm & 1) 605 if (perm & 1)
638 tomoyo_update_single_path_acl(TOMOYO_TYPE_EXECUTE_ACL, 606 tomoyo_update_path_acl(TOMOYO_TYPE_EXECUTE, filename, domain,
639 filename, domain, is_delete); 607 is_delete);
640 return 0; 608 return 0;
641} 609}
642 610
643/** 611/**
644 * tomoyo_check_single_path_acl2 - Check permission for single path operation. 612 * tomoyo_path_acl2 - Check permission for single path operation.
645 * 613 *
646 * @domain: Pointer to "struct tomoyo_domain_info". 614 * @domain: Pointer to "struct tomoyo_domain_info".
647 * @filename: Filename to check. 615 * @filename: Filename to check.
@@ -649,26 +617,28 @@ static int tomoyo_update_file_acl(const char *filename, u8 perm,
649 * @may_use_pattern: True if patterned ACL is permitted. 617 * @may_use_pattern: True if patterned ACL is permitted.
650 * 618 *
651 * Returns 0 on success, -EPERM otherwise. 619 * Returns 0 on success, -EPERM otherwise.
620 *
621 * Caller holds tomoyo_read_lock().
652 */ 622 */
653static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info * 623static int tomoyo_path_acl2(const struct tomoyo_domain_info *domain,
654 domain, 624 const struct tomoyo_path_info *filename,
655 const struct tomoyo_path_info * 625 const u32 perm, const bool may_use_pattern)
656 filename,
657 const u16 perm,
658 const bool may_use_pattern)
659{ 626{
660 struct tomoyo_acl_info *ptr; 627 struct tomoyo_acl_info *ptr;
661 int error = -EPERM; 628 int error = -EPERM;
662 629
663 down_read(&tomoyo_domain_acl_info_list_lock); 630 list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
664 list_for_each_entry(ptr, &domain->acl_info_list, list) { 631 struct tomoyo_path_acl *acl;
665 struct tomoyo_single_path_acl_record *acl; 632 if (ptr->type != TOMOYO_TYPE_PATH_ACL)
666 if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
667 continue;
668 acl = container_of(ptr, struct tomoyo_single_path_acl_record,
669 head);
670 if (!(acl->perm & perm))
671 continue; 633 continue;
634 acl = container_of(ptr, struct tomoyo_path_acl, head);
635 if (perm <= 0xFFFF) {
636 if (!(acl->perm & perm))
637 continue;
638 } else {
639 if (!(acl->perm_high & (perm >> 16)))
640 continue;
641 }
672 if (may_use_pattern || !acl->filename->is_patterned) { 642 if (may_use_pattern || !acl->filename->is_patterned) {
673 if (!tomoyo_path_matches_pattern(filename, 643 if (!tomoyo_path_matches_pattern(filename,
674 acl->filename)) 644 acl->filename))
@@ -679,7 +649,6 @@ static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
679 error = 0; 649 error = 0;
680 break; 650 break;
681 } 651 }
682 up_read(&tomoyo_domain_acl_info_list_lock);
683 return error; 652 return error;
684} 653}
685 654
@@ -691,27 +660,28 @@ static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
691 * @operation: Mode ("read" or "write" or "read/write" or "execute"). 660 * @operation: Mode ("read" or "write" or "read/write" or "execute").
692 * 661 *
693 * Returns 0 on success, -EPERM otherwise. 662 * Returns 0 on success, -EPERM otherwise.
663 *
664 * Caller holds tomoyo_read_lock().
694 */ 665 */
695static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain, 666static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
696 const struct tomoyo_path_info *filename, 667 const struct tomoyo_path_info *filename,
697 const u8 operation) 668 const u8 operation)
698{ 669{
699 u16 perm = 0; 670 u32 perm = 0;
700 671
701 if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE)) 672 if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
702 return 0; 673 return 0;
703 if (operation == 6) 674 if (operation == 6)
704 perm = 1 << TOMOYO_TYPE_READ_WRITE_ACL; 675 perm = 1 << TOMOYO_TYPE_READ_WRITE;
705 else if (operation == 4) 676 else if (operation == 4)
706 perm = 1 << TOMOYO_TYPE_READ_ACL; 677 perm = 1 << TOMOYO_TYPE_READ;
707 else if (operation == 2) 678 else if (operation == 2)
708 perm = 1 << TOMOYO_TYPE_WRITE_ACL; 679 perm = 1 << TOMOYO_TYPE_WRITE;
709 else if (operation == 1) 680 else if (operation == 1)
710 perm = 1 << TOMOYO_TYPE_EXECUTE_ACL; 681 perm = 1 << TOMOYO_TYPE_EXECUTE;
711 else 682 else
712 BUG(); 683 BUG();
713 return tomoyo_check_single_path_acl2(domain, filename, perm, 684 return tomoyo_path_acl2(domain, filename, perm, operation != 1);
714 operation != 1);
715} 685}
716 686
717/** 687/**
@@ -724,6 +694,8 @@ static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
724 * @mode: Access control mode. 694 * @mode: Access control mode.
725 * 695 *
726 * Returns 0 on success, negative value otherwise. 696 * Returns 0 on success, negative value otherwise.
697 *
698 * Caller holds tomoyo_read_lock().
727 */ 699 */
728static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain, 700static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
729 const struct tomoyo_path_info *filename, 701 const struct tomoyo_path_info *filename,
@@ -737,18 +709,17 @@ static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
737 if (!filename) 709 if (!filename)
738 return 0; 710 return 0;
739 error = tomoyo_check_file_acl(domain, filename, perm); 711 error = tomoyo_check_file_acl(domain, filename, perm);
740 if (error && perm == 4 && 712 if (error && perm == 4 && !domain->ignore_global_allow_read
741 (domain->flags & TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ) == 0
742 && tomoyo_is_globally_readable_file(filename)) 713 && tomoyo_is_globally_readable_file(filename))
743 error = 0; 714 error = 0;
744 if (perm == 6) 715 if (perm == 6)
745 msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_WRITE_ACL); 716 msg = tomoyo_path2keyword(TOMOYO_TYPE_READ_WRITE);
746 else if (perm == 4) 717 else if (perm == 4)
747 msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_ACL); 718 msg = tomoyo_path2keyword(TOMOYO_TYPE_READ);
748 else if (perm == 2) 719 else if (perm == 2)
749 msg = tomoyo_sp2keyword(TOMOYO_TYPE_WRITE_ACL); 720 msg = tomoyo_path2keyword(TOMOYO_TYPE_WRITE);
750 else if (perm == 1) 721 else if (perm == 1)
751 msg = tomoyo_sp2keyword(TOMOYO_TYPE_EXECUTE_ACL); 722 msg = tomoyo_path2keyword(TOMOYO_TYPE_EXECUTE);
752 else 723 else
753 BUG(); 724 BUG();
754 if (!error) 725 if (!error)
@@ -777,6 +748,8 @@ static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
777 * @is_delete: True if it is a delete request. 748 * @is_delete: True if it is a delete request.
778 * 749 *
779 * Returns 0 on success, negative value otherwise. 750 * Returns 0 on success, negative value otherwise.
751 *
752 * Caller holds tomoyo_read_lock().
780 */ 753 */
781int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain, 754int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
782 const bool is_delete) 755 const bool is_delete)
@@ -795,28 +768,28 @@ int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
795 if (strncmp(data, "allow_", 6)) 768 if (strncmp(data, "allow_", 6))
796 goto out; 769 goto out;
797 data += 6; 770 data += 6;
798 for (type = 0; type < TOMOYO_MAX_SINGLE_PATH_OPERATION; type++) { 771 for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) {
799 if (strcmp(data, tomoyo_sp_keyword[type])) 772 if (strcmp(data, tomoyo_path_keyword[type]))
800 continue; 773 continue;
801 return tomoyo_update_single_path_acl(type, filename, 774 return tomoyo_update_path_acl(type, filename, domain,
802 domain, is_delete); 775 is_delete);
803 } 776 }
804 filename2 = strchr(filename, ' '); 777 filename2 = strchr(filename, ' ');
805 if (!filename2) 778 if (!filename2)
806 goto out; 779 goto out;
807 *filename2++ = '\0'; 780 *filename2++ = '\0';
808 for (type = 0; type < TOMOYO_MAX_DOUBLE_PATH_OPERATION; type++) { 781 for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) {
809 if (strcmp(data, tomoyo_dp_keyword[type])) 782 if (strcmp(data, tomoyo_path2_keyword[type]))
810 continue; 783 continue;
811 return tomoyo_update_double_path_acl(type, filename, filename2, 784 return tomoyo_update_path2_acl(type, filename, filename2,
812 domain, is_delete); 785 domain, is_delete);
813 } 786 }
814 out: 787 out:
815 return -EINVAL; 788 return -EINVAL;
816} 789}
817 790
818/** 791/**
819 * tomoyo_update_single_path_acl - Update "struct tomoyo_single_path_acl_record" list. 792 * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list.
820 * 793 *
821 * @type: Type of operation. 794 * @type: Type of operation.
822 * @filename: Filename. 795 * @filename: Filename.
@@ -824,85 +797,82 @@ int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
824 * @is_delete: True if it is a delete request. 797 * @is_delete: True if it is a delete request.
825 * 798 *
826 * Returns 0 on success, negative value otherwise. 799 * Returns 0 on success, negative value otherwise.
800 *
801 * Caller holds tomoyo_read_lock().
827 */ 802 */
828static int tomoyo_update_single_path_acl(const u8 type, const char *filename, 803static int tomoyo_update_path_acl(const u8 type, const char *filename,
829 struct tomoyo_domain_info * 804 struct tomoyo_domain_info *const domain,
830 const domain, const bool is_delete) 805 const bool is_delete)
831{ 806{
832 static const u16 rw_mask = 807 static const u32 rw_mask =
833 (1 << TOMOYO_TYPE_READ_ACL) | (1 << TOMOYO_TYPE_WRITE_ACL); 808 (1 << TOMOYO_TYPE_READ) | (1 << TOMOYO_TYPE_WRITE);
834 const struct tomoyo_path_info *saved_filename; 809 const struct tomoyo_path_info *saved_filename;
835 struct tomoyo_acl_info *ptr; 810 struct tomoyo_acl_info *ptr;
836 struct tomoyo_single_path_acl_record *acl; 811 struct tomoyo_path_acl *entry = NULL;
837 int error = -ENOMEM; 812 int error = is_delete ? -ENOENT : -ENOMEM;
838 const u16 perm = 1 << type; 813 const u32 perm = 1 << type;
839 814
840 if (!domain) 815 if (!domain)
841 return -EINVAL; 816 return -EINVAL;
842 if (!tomoyo_is_correct_path(filename, 0, 0, 0, __func__)) 817 if (!tomoyo_is_correct_path(filename, 0, 0, 0))
843 return -EINVAL; 818 return -EINVAL;
844 saved_filename = tomoyo_save_name(filename); 819 saved_filename = tomoyo_get_name(filename);
845 if (!saved_filename) 820 if (!saved_filename)
846 return -ENOMEM; 821 return -ENOMEM;
847 down_write(&tomoyo_domain_acl_info_list_lock); 822 if (!is_delete)
848 if (is_delete) 823 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
849 goto delete; 824 mutex_lock(&tomoyo_policy_lock);
850 list_for_each_entry(ptr, &domain->acl_info_list, list) { 825 list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
851 if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL) 826 struct tomoyo_path_acl *acl =
827 container_of(ptr, struct tomoyo_path_acl, head);
828 if (ptr->type != TOMOYO_TYPE_PATH_ACL)
852 continue; 829 continue;
853 acl = container_of(ptr, struct tomoyo_single_path_acl_record,
854 head);
855 if (acl->filename != saved_filename) 830 if (acl->filename != saved_filename)
856 continue; 831 continue;
857 /* Special case. Clear all bits if marked as deleted. */ 832 if (is_delete) {
858 if (ptr->type & TOMOYO_ACL_DELETED) 833 if (perm <= 0xFFFF)
859 acl->perm = 0; 834 acl->perm &= ~perm;
860 acl->perm |= perm; 835 else
861 if ((acl->perm & rw_mask) == rw_mask) 836 acl->perm_high &= ~(perm >> 16);
862 acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE_ACL; 837 if ((acl->perm & rw_mask) != rw_mask)
863 else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)) 838 acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE);
864 acl->perm |= rw_mask; 839 else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE)))
865 ptr->type &= ~TOMOYO_ACL_DELETED; 840 acl->perm &= ~rw_mask;
841 } else {
842 if (perm <= 0xFFFF)
843 acl->perm |= perm;
844 else
845 acl->perm_high |= (perm >> 16);
846 if ((acl->perm & rw_mask) == rw_mask)
847 acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE;
848 else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE))
849 acl->perm |= rw_mask;
850 }
866 error = 0; 851 error = 0;
867 goto out; 852 break;
868 } 853 }
869 /* Not found. Append it to the tail. */ 854 if (!is_delete && error && tomoyo_memory_ok(entry)) {
870 acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_SINGLE_PATH_ACL); 855 entry->head.type = TOMOYO_TYPE_PATH_ACL;
871 if (!acl) 856 if (perm <= 0xFFFF)
872 goto out; 857 entry->perm = perm;
873 acl->perm = perm; 858 else
874 if (perm == (1 << TOMOYO_TYPE_READ_WRITE_ACL)) 859 entry->perm_high = (perm >> 16);
875 acl->perm |= rw_mask; 860 if (perm == (1 << TOMOYO_TYPE_READ_WRITE))
876 acl->filename = saved_filename; 861 entry->perm |= rw_mask;
877 list_add_tail(&acl->head.list, &domain->acl_info_list); 862 entry->filename = saved_filename;
878 error = 0; 863 saved_filename = NULL;
879 goto out; 864 list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
880 delete: 865 entry = NULL;
881 error = -ENOENT;
882 list_for_each_entry(ptr, &domain->acl_info_list, list) {
883 if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
884 continue;
885 acl = container_of(ptr, struct tomoyo_single_path_acl_record,
886 head);
887 if (acl->filename != saved_filename)
888 continue;
889 acl->perm &= ~perm;
890 if ((acl->perm & rw_mask) != rw_mask)
891 acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE_ACL);
892 else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
893 acl->perm &= ~rw_mask;
894 if (!acl->perm)
895 ptr->type |= TOMOYO_ACL_DELETED;
896 error = 0; 866 error = 0;
897 break;
898 } 867 }
899 out: 868 mutex_unlock(&tomoyo_policy_lock);
900 up_write(&tomoyo_domain_acl_info_list_lock); 869 kfree(entry);
870 tomoyo_put_name(saved_filename);
901 return error; 871 return error;
902} 872}
903 873
904/** 874/**
905 * tomoyo_update_double_path_acl - Update "struct tomoyo_double_path_acl_record" list. 875 * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list.
906 * 876 *
907 * @type: Type of operation. 877 * @type: Type of operation.
908 * @filename1: First filename. 878 * @filename1: First filename.
@@ -911,98 +881,88 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
911 * @is_delete: True if it is a delete request. 881 * @is_delete: True if it is a delete request.
912 * 882 *
913 * Returns 0 on success, negative value otherwise. 883 * Returns 0 on success, negative value otherwise.
884 *
885 * Caller holds tomoyo_read_lock().
914 */ 886 */
915static int tomoyo_update_double_path_acl(const u8 type, const char *filename1, 887static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
916 const char *filename2, 888 const char *filename2,
917 struct tomoyo_domain_info * 889 struct tomoyo_domain_info *const domain,
918 const domain, const bool is_delete) 890 const bool is_delete)
919{ 891{
920 const struct tomoyo_path_info *saved_filename1; 892 const struct tomoyo_path_info *saved_filename1;
921 const struct tomoyo_path_info *saved_filename2; 893 const struct tomoyo_path_info *saved_filename2;
922 struct tomoyo_acl_info *ptr; 894 struct tomoyo_acl_info *ptr;
923 struct tomoyo_double_path_acl_record *acl; 895 struct tomoyo_path2_acl *entry = NULL;
924 int error = -ENOMEM; 896 int error = is_delete ? -ENOENT : -ENOMEM;
925 const u8 perm = 1 << type; 897 const u8 perm = 1 << type;
926 898
927 if (!domain) 899 if (!domain)
928 return -EINVAL; 900 return -EINVAL;
929 if (!tomoyo_is_correct_path(filename1, 0, 0, 0, __func__) || 901 if (!tomoyo_is_correct_path(filename1, 0, 0, 0) ||
930 !tomoyo_is_correct_path(filename2, 0, 0, 0, __func__)) 902 !tomoyo_is_correct_path(filename2, 0, 0, 0))
931 return -EINVAL; 903 return -EINVAL;
932 saved_filename1 = tomoyo_save_name(filename1); 904 saved_filename1 = tomoyo_get_name(filename1);
933 saved_filename2 = tomoyo_save_name(filename2); 905 saved_filename2 = tomoyo_get_name(filename2);
934 if (!saved_filename1 || !saved_filename2) 906 if (!saved_filename1 || !saved_filename2)
935 return -ENOMEM;
936 down_write(&tomoyo_domain_acl_info_list_lock);
937 if (is_delete)
938 goto delete;
939 list_for_each_entry(ptr, &domain->acl_info_list, list) {
940 if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
941 continue;
942 acl = container_of(ptr, struct tomoyo_double_path_acl_record,
943 head);
944 if (acl->filename1 != saved_filename1 ||
945 acl->filename2 != saved_filename2)
946 continue;
947 /* Special case. Clear all bits if marked as deleted. */
948 if (ptr->type & TOMOYO_ACL_DELETED)
949 acl->perm = 0;
950 acl->perm |= perm;
951 ptr->type &= ~TOMOYO_ACL_DELETED;
952 error = 0;
953 goto out; 907 goto out;
954 } 908 if (!is_delete)
955 /* Not found. Append it to the tail. */ 909 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
956 acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_DOUBLE_PATH_ACL); 910 mutex_lock(&tomoyo_policy_lock);
957 if (!acl) 911 list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
958 goto out; 912 struct tomoyo_path2_acl *acl =
959 acl->perm = perm; 913 container_of(ptr, struct tomoyo_path2_acl, head);
960 acl->filename1 = saved_filename1; 914 if (ptr->type != TOMOYO_TYPE_PATH2_ACL)
961 acl->filename2 = saved_filename2;
962 list_add_tail(&acl->head.list, &domain->acl_info_list);
963 error = 0;
964 goto out;
965 delete:
966 error = -ENOENT;
967 list_for_each_entry(ptr, &domain->acl_info_list, list) {
968 if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
969 continue; 915 continue;
970 acl = container_of(ptr, struct tomoyo_double_path_acl_record,
971 head);
972 if (acl->filename1 != saved_filename1 || 916 if (acl->filename1 != saved_filename1 ||
973 acl->filename2 != saved_filename2) 917 acl->filename2 != saved_filename2)
974 continue; 918 continue;
975 acl->perm &= ~perm; 919 if (is_delete)
976 if (!acl->perm) 920 acl->perm &= ~perm;
977 ptr->type |= TOMOYO_ACL_DELETED; 921 else
922 acl->perm |= perm;
978 error = 0; 923 error = 0;
979 break; 924 break;
980 } 925 }
926 if (!is_delete && error && tomoyo_memory_ok(entry)) {
927 entry->head.type = TOMOYO_TYPE_PATH2_ACL;
928 entry->perm = perm;
929 entry->filename1 = saved_filename1;
930 saved_filename1 = NULL;
931 entry->filename2 = saved_filename2;
932 saved_filename2 = NULL;
933 list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
934 entry = NULL;
935 error = 0;
936 }
937 mutex_unlock(&tomoyo_policy_lock);
981 out: 938 out:
982 up_write(&tomoyo_domain_acl_info_list_lock); 939 tomoyo_put_name(saved_filename1);
940 tomoyo_put_name(saved_filename2);
941 kfree(entry);
983 return error; 942 return error;
984} 943}
985 944
986/** 945/**
987 * tomoyo_check_single_path_acl - Check permission for single path operation. 946 * tomoyo_path_acl - Check permission for single path operation.
988 * 947 *
989 * @domain: Pointer to "struct tomoyo_domain_info". 948 * @domain: Pointer to "struct tomoyo_domain_info".
990 * @type: Type of operation. 949 * @type: Type of operation.
991 * @filename: Filename to check. 950 * @filename: Filename to check.
992 * 951 *
993 * Returns 0 on success, negative value otherwise. 952 * Returns 0 on success, negative value otherwise.
953 *
954 * Caller holds tomoyo_read_lock().
994 */ 955 */
995static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain, 956static int tomoyo_path_acl(struct tomoyo_domain_info *domain, const u8 type,
996 const u8 type, 957 const struct tomoyo_path_info *filename)
997 const struct tomoyo_path_info *filename)
998{ 958{
999 if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE)) 959 if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
1000 return 0; 960 return 0;
1001 return tomoyo_check_single_path_acl2(domain, filename, 1 << type, 1); 961 return tomoyo_path_acl2(domain, filename, 1 << type, 1);
1002} 962}
1003 963
1004/** 964/**
1005 * tomoyo_check_double_path_acl - Check permission for double path operation. 965 * tomoyo_path2_acl - Check permission for double path operation.
1006 * 966 *
1007 * @domain: Pointer to "struct tomoyo_domain_info". 967 * @domain: Pointer to "struct tomoyo_domain_info".
1008 * @type: Type of operation. 968 * @type: Type of operation.
@@ -1010,13 +970,13 @@ static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain,
1010 * @filename2: Second filename to check. 970 * @filename2: Second filename to check.
1011 * 971 *
1012 * Returns 0 on success, -EPERM otherwise. 972 * Returns 0 on success, -EPERM otherwise.
973 *
974 * Caller holds tomoyo_read_lock().
1013 */ 975 */
1014static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain, 976static int tomoyo_path2_acl(const struct tomoyo_domain_info *domain,
1015 const u8 type, 977 const u8 type,
1016 const struct tomoyo_path_info * 978 const struct tomoyo_path_info *filename1,
1017 filename1, 979 const struct tomoyo_path_info *filename2)
1018 const struct tomoyo_path_info *
1019 filename2)
1020{ 980{
1021 struct tomoyo_acl_info *ptr; 981 struct tomoyo_acl_info *ptr;
1022 const u8 perm = 1 << type; 982 const u8 perm = 1 << type;
@@ -1024,13 +984,11 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
1024 984
1025 if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE)) 985 if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
1026 return 0; 986 return 0;
1027 down_read(&tomoyo_domain_acl_info_list_lock); 987 list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
1028 list_for_each_entry(ptr, &domain->acl_info_list, list) { 988 struct tomoyo_path2_acl *acl;
1029 struct tomoyo_double_path_acl_record *acl; 989 if (ptr->type != TOMOYO_TYPE_PATH2_ACL)
1030 if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
1031 continue; 990 continue;
1032 acl = container_of(ptr, struct tomoyo_double_path_acl_record, 991 acl = container_of(ptr, struct tomoyo_path2_acl, head);
1033 head);
1034 if (!(acl->perm & perm)) 992 if (!(acl->perm & perm))
1035 continue; 993 continue;
1036 if (!tomoyo_path_matches_pattern(filename1, acl->filename1)) 994 if (!tomoyo_path_matches_pattern(filename1, acl->filename1))
@@ -1040,12 +998,11 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
1040 error = 0; 998 error = 0;
1041 break; 999 break;
1042 } 1000 }
1043 up_read(&tomoyo_domain_acl_info_list_lock);
1044 return error; 1001 return error;
1045} 1002}
1046 1003
1047/** 1004/**
1048 * tomoyo_check_single_path_permission2 - Check permission for single path operation. 1005 * tomoyo_path_permission2 - Check permission for single path operation.
1049 * 1006 *
1050 * @domain: Pointer to "struct tomoyo_domain_info". 1007 * @domain: Pointer to "struct tomoyo_domain_info".
1051 * @operation: Type of operation. 1008 * @operation: Type of operation.
@@ -1053,11 +1010,13 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
1053 * @mode: Access control mode. 1010 * @mode: Access control mode.
1054 * 1011 *
1055 * Returns 0 on success, negative value otherwise. 1012 * Returns 0 on success, negative value otherwise.
1013 *
1014 * Caller holds tomoyo_read_lock().
1056 */ 1015 */
1057static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info * 1016static int tomoyo_path_permission2(struct tomoyo_domain_info *const domain,
1058 const domain, u8 operation, 1017 u8 operation,
1059 const struct tomoyo_path_info * 1018 const struct tomoyo_path_info *filename,
1060 filename, const u8 mode) 1019 const u8 mode)
1061{ 1020{
1062 const char *msg; 1021 const char *msg;
1063 int error; 1022 int error;
@@ -1066,8 +1025,8 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
1066 if (!mode) 1025 if (!mode)
1067 return 0; 1026 return 0;
1068 next: 1027 next:
1069 error = tomoyo_check_single_path_acl(domain, operation, filename); 1028 error = tomoyo_path_acl(domain, operation, filename);
1070 msg = tomoyo_sp2keyword(operation); 1029 msg = tomoyo_path2keyword(operation);
1071 if (!error) 1030 if (!error)
1072 goto ok; 1031 goto ok;
1073 if (tomoyo_verbose_mode(domain)) 1032 if (tomoyo_verbose_mode(domain))
@@ -1076,7 +1035,7 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
1076 tomoyo_get_last_name(domain)); 1035 tomoyo_get_last_name(domain));
1077 if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) { 1036 if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
1078 const char *name = tomoyo_get_file_pattern(filename)->name; 1037 const char *name = tomoyo_get_file_pattern(filename)->name;
1079 tomoyo_update_single_path_acl(operation, name, domain, false); 1038 tomoyo_update_path_acl(operation, name, domain, false);
1080 } 1039 }
1081 if (!is_enforce) 1040 if (!is_enforce)
1082 error = 0; 1041 error = 0;
@@ -1086,9 +1045,9 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
1086 * we need to check "allow_rewrite" permission if the filename is 1045 * we need to check "allow_rewrite" permission if the filename is
1087 * specified by "deny_rewrite" keyword. 1046 * specified by "deny_rewrite" keyword.
1088 */ 1047 */
1089 if (!error && operation == TOMOYO_TYPE_TRUNCATE_ACL && 1048 if (!error && operation == TOMOYO_TYPE_TRUNCATE &&
1090 tomoyo_is_no_rewrite_file(filename)) { 1049 tomoyo_is_no_rewrite_file(filename)) {
1091 operation = TOMOYO_TYPE_REWRITE_ACL; 1050 operation = TOMOYO_TYPE_REWRITE;
1092 goto next; 1051 goto next;
1093 } 1052 }
1094 return error; 1053 return error;
@@ -1101,6 +1060,8 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
1101 * @filename: Check permission for "execute". 1060 * @filename: Check permission for "execute".
1102 * 1061 *
1103 * Returns 0 on success, negativevalue otherwise. 1062 * Returns 0 on success, negativevalue otherwise.
1063 *
1064 * Caller holds tomoyo_read_lock().
1104 */ 1065 */
1105int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain, 1066int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
1106 const struct tomoyo_path_info *filename) 1067 const struct tomoyo_path_info *filename)
@@ -1129,6 +1090,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
1129 struct tomoyo_path_info *buf; 1090 struct tomoyo_path_info *buf;
1130 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE); 1091 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
1131 const bool is_enforce = (mode == 3); 1092 const bool is_enforce = (mode == 3);
1093 int idx;
1132 1094
1133 if (!mode || !path->mnt) 1095 if (!mode || !path->mnt)
1134 return 0; 1096 return 0;
@@ -1140,6 +1102,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
1140 * don't call me. 1102 * don't call me.
1141 */ 1103 */
1142 return 0; 1104 return 0;
1105 idx = tomoyo_read_lock();
1143 buf = tomoyo_get_path(path); 1106 buf = tomoyo_get_path(path);
1144 if (!buf) 1107 if (!buf)
1145 goto out; 1108 goto out;
@@ -1152,49 +1115,50 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
1152 if ((acc_mode & MAY_WRITE) && 1115 if ((acc_mode & MAY_WRITE) &&
1153 ((flag & O_TRUNC) || !(flag & O_APPEND)) && 1116 ((flag & O_TRUNC) || !(flag & O_APPEND)) &&
1154 (tomoyo_is_no_rewrite_file(buf))) { 1117 (tomoyo_is_no_rewrite_file(buf))) {
1155 error = tomoyo_check_single_path_permission2(domain, 1118 error = tomoyo_path_permission2(domain, TOMOYO_TYPE_REWRITE,
1156 TOMOYO_TYPE_REWRITE_ACL, 1119 buf, mode);
1157 buf, mode);
1158 } 1120 }
1159 if (!error) 1121 if (!error)
1160 error = tomoyo_check_file_perm2(domain, buf, acc_mode, "open", 1122 error = tomoyo_check_file_perm2(domain, buf, acc_mode, "open",
1161 mode); 1123 mode);
1162 if (!error && (flag & O_TRUNC)) 1124 if (!error && (flag & O_TRUNC))
1163 error = tomoyo_check_single_path_permission2(domain, 1125 error = tomoyo_path_permission2(domain, TOMOYO_TYPE_TRUNCATE,
1164 TOMOYO_TYPE_TRUNCATE_ACL, 1126 buf, mode);
1165 buf, mode);
1166 out: 1127 out:
1167 tomoyo_free(buf); 1128 kfree(buf);
1129 tomoyo_read_unlock(idx);
1168 if (!is_enforce) 1130 if (!is_enforce)
1169 error = 0; 1131 error = 0;
1170 return error; 1132 return error;
1171} 1133}
1172 1134
1173/** 1135/**
1174 * tomoyo_check_1path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate" and "symlink". 1136 * tomoyo_path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate", "symlink", "ioctl", "chmod", "chown", "chgrp", "chroot", "mount" and "unmount".
1175 * 1137 *
1176 * @domain: Pointer to "struct tomoyo_domain_info".
1177 * @operation: Type of operation. 1138 * @operation: Type of operation.
1178 * @path: Pointer to "struct path". 1139 * @path: Pointer to "struct path".
1179 * 1140 *
1180 * Returns 0 on success, negative value otherwise. 1141 * Returns 0 on success, negative value otherwise.
1181 */ 1142 */
1182int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain, 1143int tomoyo_path_perm(const u8 operation, struct path *path)
1183 const u8 operation, struct path *path)
1184{ 1144{
1185 int error = -ENOMEM; 1145 int error = -ENOMEM;
1186 struct tomoyo_path_info *buf; 1146 struct tomoyo_path_info *buf;
1147 struct tomoyo_domain_info *domain = tomoyo_domain();
1187 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE); 1148 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
1188 const bool is_enforce = (mode == 3); 1149 const bool is_enforce = (mode == 3);
1150 int idx;
1189 1151
1190 if (!mode || !path->mnt) 1152 if (!mode || !path->mnt)
1191 return 0; 1153 return 0;
1154 idx = tomoyo_read_lock();
1192 buf = tomoyo_get_path(path); 1155 buf = tomoyo_get_path(path);
1193 if (!buf) 1156 if (!buf)
1194 goto out; 1157 goto out;
1195 switch (operation) { 1158 switch (operation) {
1196 case TOMOYO_TYPE_MKDIR_ACL: 1159 case TOMOYO_TYPE_MKDIR:
1197 case TOMOYO_TYPE_RMDIR_ACL: 1160 case TOMOYO_TYPE_RMDIR:
1161 case TOMOYO_TYPE_CHROOT:
1198 if (!buf->is_dir) { 1162 if (!buf->is_dir) {
1199 /* 1163 /*
1200 * tomoyo_get_path() reserves space for appending "/." 1164 * tomoyo_get_path() reserves space for appending "/."
@@ -1203,10 +1167,10 @@ int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
1203 tomoyo_fill_path_info(buf); 1167 tomoyo_fill_path_info(buf);
1204 } 1168 }
1205 } 1169 }
1206 error = tomoyo_check_single_path_permission2(domain, operation, buf, 1170 error = tomoyo_path_permission2(domain, operation, buf, mode);
1207 mode);
1208 out: 1171 out:
1209 tomoyo_free(buf); 1172 kfree(buf);
1173 tomoyo_read_unlock(idx);
1210 if (!is_enforce) 1174 if (!is_enforce)
1211 error = 0; 1175 error = 0;
1212 return error; 1176 return error;
@@ -1215,21 +1179,23 @@ int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
1215/** 1179/**
1216 * tomoyo_check_rewrite_permission - Check permission for "rewrite". 1180 * tomoyo_check_rewrite_permission - Check permission for "rewrite".
1217 * 1181 *
1218 * @domain: Pointer to "struct tomoyo_domain_info".
1219 * @filp: Pointer to "struct file". 1182 * @filp: Pointer to "struct file".
1220 * 1183 *
1221 * Returns 0 on success, negative value otherwise. 1184 * Returns 0 on success, negative value otherwise.
1222 */ 1185 */
1223int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain, 1186int tomoyo_check_rewrite_permission(struct file *filp)
1224 struct file *filp)
1225{ 1187{
1226 int error = -ENOMEM; 1188 int error = -ENOMEM;
1189 struct tomoyo_domain_info *domain = tomoyo_domain();
1227 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE); 1190 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
1228 const bool is_enforce = (mode == 3); 1191 const bool is_enforce = (mode == 3);
1229 struct tomoyo_path_info *buf; 1192 struct tomoyo_path_info *buf;
1193 int idx;
1230 1194
1231 if (!mode || !filp->f_path.mnt) 1195 if (!mode || !filp->f_path.mnt)
1232 return 0; 1196 return 0;
1197
1198 idx = tomoyo_read_lock();
1233 buf = tomoyo_get_path(&filp->f_path); 1199 buf = tomoyo_get_path(&filp->f_path);
1234 if (!buf) 1200 if (!buf)
1235 goto out; 1201 goto out;
@@ -1237,38 +1203,38 @@ int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
1237 error = 0; 1203 error = 0;
1238 goto out; 1204 goto out;
1239 } 1205 }
1240 error = tomoyo_check_single_path_permission2(domain, 1206 error = tomoyo_path_permission2(domain, TOMOYO_TYPE_REWRITE, buf, mode);
1241 TOMOYO_TYPE_REWRITE_ACL,
1242 buf, mode);
1243 out: 1207 out:
1244 tomoyo_free(buf); 1208 kfree(buf);
1209 tomoyo_read_unlock(idx);
1245 if (!is_enforce) 1210 if (!is_enforce)
1246 error = 0; 1211 error = 0;
1247 return error; 1212 return error;
1248} 1213}
1249 1214
1250/** 1215/**
1251 * tomoyo_check_2path_perm - Check permission for "rename" and "link". 1216 * tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root".
1252 * 1217 *
1253 * @domain: Pointer to "struct tomoyo_domain_info".
1254 * @operation: Type of operation. 1218 * @operation: Type of operation.
1255 * @path1: Pointer to "struct path". 1219 * @path1: Pointer to "struct path".
1256 * @path2: Pointer to "struct path". 1220 * @path2: Pointer to "struct path".
1257 * 1221 *
1258 * Returns 0 on success, negative value otherwise. 1222 * Returns 0 on success, negative value otherwise.
1259 */ 1223 */
1260int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain, 1224int tomoyo_path2_perm(const u8 operation, struct path *path1,
1261 const u8 operation, struct path *path1, 1225 struct path *path2)
1262 struct path *path2)
1263{ 1226{
1264 int error = -ENOMEM; 1227 int error = -ENOMEM;
1265 struct tomoyo_path_info *buf1, *buf2; 1228 struct tomoyo_path_info *buf1, *buf2;
1229 struct tomoyo_domain_info *domain = tomoyo_domain();
1266 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE); 1230 const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
1267 const bool is_enforce = (mode == 3); 1231 const bool is_enforce = (mode == 3);
1268 const char *msg; 1232 const char *msg;
1233 int idx;
1269 1234
1270 if (!mode || !path1->mnt || !path2->mnt) 1235 if (!mode || !path1->mnt || !path2->mnt)
1271 return 0; 1236 return 0;
1237 idx = tomoyo_read_lock();
1272 buf1 = tomoyo_get_path(path1); 1238 buf1 = tomoyo_get_path(path1);
1273 buf2 = tomoyo_get_path(path2); 1239 buf2 = tomoyo_get_path(path2);
1274 if (!buf1 || !buf2) 1240 if (!buf1 || !buf2)
@@ -1289,8 +1255,8 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
1289 } 1255 }
1290 } 1256 }
1291 } 1257 }
1292 error = tomoyo_check_double_path_acl(domain, operation, buf1, buf2); 1258 error = tomoyo_path2_acl(domain, operation, buf1, buf2);
1293 msg = tomoyo_dp2keyword(operation); 1259 msg = tomoyo_path22keyword(operation);
1294 if (!error) 1260 if (!error)
1295 goto out; 1261 goto out;
1296 if (tomoyo_verbose_mode(domain)) 1262 if (tomoyo_verbose_mode(domain))
@@ -1301,12 +1267,13 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
1301 if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) { 1267 if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
1302 const char *name1 = tomoyo_get_file_pattern(buf1)->name; 1268 const char *name1 = tomoyo_get_file_pattern(buf1)->name;
1303 const char *name2 = tomoyo_get_file_pattern(buf2)->name; 1269 const char *name2 = tomoyo_get_file_pattern(buf2)->name;
1304 tomoyo_update_double_path_acl(operation, name1, name2, domain, 1270 tomoyo_update_path2_acl(operation, name1, name2, domain,
1305 false); 1271 false);
1306 } 1272 }
1307 out: 1273 out:
1308 tomoyo_free(buf1); 1274 kfree(buf1);
1309 tomoyo_free(buf2); 1275 kfree(buf2);
1276 tomoyo_read_unlock(idx);
1310 if (!is_enforce) 1277 if (!is_enforce)
1311 error = 0; 1278 error = 0;
1312 return error; 1279 return error;
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
new file mode 100644
index 000000000000..9645525ccdd4
--- /dev/null
+++ b/security/tomoyo/gc.c
@@ -0,0 +1,370 @@
1/*
2 * security/tomoyo/gc.c
3 *
4 * Implementation of the Domain-Based Mandatory Access Control.
5 *
6 * Copyright (C) 2005-2010 NTT DATA CORPORATION
7 *
8 */
9
10#include "common.h"
11#include <linux/kthread.h>
12
13enum tomoyo_gc_id {
14 TOMOYO_ID_DOMAIN_INITIALIZER,
15 TOMOYO_ID_DOMAIN_KEEPER,
16 TOMOYO_ID_ALIAS,
17 TOMOYO_ID_GLOBALLY_READABLE,
18 TOMOYO_ID_PATTERN,
19 TOMOYO_ID_NO_REWRITE,
20 TOMOYO_ID_MANAGER,
21 TOMOYO_ID_NAME,
22 TOMOYO_ID_ACL,
23 TOMOYO_ID_DOMAIN
24};
25
26struct tomoyo_gc_entry {
27 struct list_head list;
28 int type;
29 void *element;
30};
31static LIST_HEAD(tomoyo_gc_queue);
32static DEFINE_MUTEX(tomoyo_gc_mutex);
33
34/* Caller holds tomoyo_policy_lock mutex. */
35static bool tomoyo_add_to_gc(const int type, void *element)
36{
37 struct tomoyo_gc_entry *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
38 if (!entry)
39 return false;
40 entry->type = type;
41 entry->element = element;
42 list_add(&entry->list, &tomoyo_gc_queue);
43 return true;
44}
45
46static void tomoyo_del_allow_read
47(struct tomoyo_globally_readable_file_entry *ptr)
48{
49 tomoyo_put_name(ptr->filename);
50}
51
52static void tomoyo_del_file_pattern(struct tomoyo_pattern_entry *ptr)
53{
54 tomoyo_put_name(ptr->pattern);
55}
56
57static void tomoyo_del_no_rewrite(struct tomoyo_no_rewrite_entry *ptr)
58{
59 tomoyo_put_name(ptr->pattern);
60}
61
62static void tomoyo_del_domain_initializer
63(struct tomoyo_domain_initializer_entry *ptr)
64{
65 tomoyo_put_name(ptr->domainname);
66 tomoyo_put_name(ptr->program);
67}
68
69static void tomoyo_del_domain_keeper(struct tomoyo_domain_keeper_entry *ptr)
70{
71 tomoyo_put_name(ptr->domainname);
72 tomoyo_put_name(ptr->program);
73}
74
75static void tomoyo_del_alias(struct tomoyo_alias_entry *ptr)
76{
77 tomoyo_put_name(ptr->original_name);
78 tomoyo_put_name(ptr->aliased_name);
79}
80
81static void tomoyo_del_manager(struct tomoyo_policy_manager_entry *ptr)
82{
83 tomoyo_put_name(ptr->manager);
84}
85
86static void tomoyo_del_acl(struct tomoyo_acl_info *acl)
87{
88 switch (acl->type) {
89 case TOMOYO_TYPE_PATH_ACL:
90 {
91 struct tomoyo_path_acl *entry
92 = container_of(acl, typeof(*entry), head);
93 tomoyo_put_name(entry->filename);
94 }
95 break;
96 case TOMOYO_TYPE_PATH2_ACL:
97 {
98 struct tomoyo_path2_acl *entry
99 = container_of(acl, typeof(*entry), head);
100 tomoyo_put_name(entry->filename1);
101 tomoyo_put_name(entry->filename2);
102 }
103 break;
104 default:
105 printk(KERN_WARNING "Unknown type\n");
106 break;
107 }
108}
109
110static bool tomoyo_del_domain(struct tomoyo_domain_info *domain)
111{
112 struct tomoyo_acl_info *acl;
113 struct tomoyo_acl_info *tmp;
114 /*
115 * Since we don't protect whole execve() operation using SRCU,
116 * we need to recheck domain->users at this point.
117 *
118 * (1) Reader starts SRCU section upon execve().
119 * (2) Reader traverses tomoyo_domain_list and finds this domain.
120 * (3) Writer marks this domain as deleted.
121 * (4) Garbage collector removes this domain from tomoyo_domain_list
122 * because this domain is marked as deleted and used by nobody.
123 * (5) Reader saves reference to this domain into
124 * "struct linux_binprm"->cred->security .
125 * (6) Reader finishes SRCU section, although execve() operation has
126 * not finished yet.
127 * (7) Garbage collector waits for SRCU synchronization.
128 * (8) Garbage collector kfree() this domain because this domain is
129 * used by nobody.
130 * (9) Reader finishes execve() operation and restores this domain from
131 * "struct linux_binprm"->cred->security.
132 *
133 * By updating domain->users at (5), we can solve this race problem
134 * by rechecking domain->users at (8).
135 */
136 if (atomic_read(&domain->users))
137 return false;
138 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
139 tomoyo_del_acl(acl);
140 tomoyo_memory_free(acl);
141 }
142 tomoyo_put_name(domain->domainname);
143 return true;
144}
145
146
147static void tomoyo_del_name(const struct tomoyo_name_entry *ptr)
148{
149}
150
151static void tomoyo_collect_entry(void)
152{
153 mutex_lock(&tomoyo_policy_lock);
154 {
155 struct tomoyo_globally_readable_file_entry *ptr;
156 list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list,
157 list) {
158 if (!ptr->is_deleted)
159 continue;
160 if (tomoyo_add_to_gc(TOMOYO_ID_GLOBALLY_READABLE, ptr))
161 list_del_rcu(&ptr->list);
162 else
163 break;
164 }
165 }
166 {
167 struct tomoyo_pattern_entry *ptr;
168 list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
169 if (!ptr->is_deleted)
170 continue;
171 if (tomoyo_add_to_gc(TOMOYO_ID_PATTERN, ptr))
172 list_del_rcu(&ptr->list);
173 else
174 break;
175 }
176 }
177 {
178 struct tomoyo_no_rewrite_entry *ptr;
179 list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
180 if (!ptr->is_deleted)
181 continue;
182 if (tomoyo_add_to_gc(TOMOYO_ID_NO_REWRITE, ptr))
183 list_del_rcu(&ptr->list);
184 else
185 break;
186 }
187 }
188 {
189 struct tomoyo_domain_initializer_entry *ptr;
190 list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list,
191 list) {
192 if (!ptr->is_deleted)
193 continue;
194 if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_INITIALIZER, ptr))
195 list_del_rcu(&ptr->list);
196 else
197 break;
198 }
199 }
200 {
201 struct tomoyo_domain_keeper_entry *ptr;
202 list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
203 if (!ptr->is_deleted)
204 continue;
205 if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_KEEPER, ptr))
206 list_del_rcu(&ptr->list);
207 else
208 break;
209 }
210 }
211 {
212 struct tomoyo_alias_entry *ptr;
213 list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
214 if (!ptr->is_deleted)
215 continue;
216 if (tomoyo_add_to_gc(TOMOYO_ID_ALIAS, ptr))
217 list_del_rcu(&ptr->list);
218 else
219 break;
220 }
221 }
222 {
223 struct tomoyo_policy_manager_entry *ptr;
224 list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list,
225 list) {
226 if (!ptr->is_deleted)
227 continue;
228 if (tomoyo_add_to_gc(TOMOYO_ID_MANAGER, ptr))
229 list_del_rcu(&ptr->list);
230 else
231 break;
232 }
233 }
234 {
235 struct tomoyo_domain_info *domain;
236 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
237 struct tomoyo_acl_info *acl;
238 list_for_each_entry_rcu(acl, &domain->acl_info_list,
239 list) {
240 switch (acl->type) {
241 case TOMOYO_TYPE_PATH_ACL:
242 if (container_of(acl,
243 struct tomoyo_path_acl,
244 head)->perm ||
245 container_of(acl,
246 struct tomoyo_path_acl,
247 head)->perm_high)
248 continue;
249 break;
250 case TOMOYO_TYPE_PATH2_ACL:
251 if (container_of(acl,
252 struct tomoyo_path2_acl,
253 head)->perm)
254 continue;
255 break;
256 default:
257 continue;
258 }
259 if (tomoyo_add_to_gc(TOMOYO_ID_ACL, acl))
260 list_del_rcu(&acl->list);
261 else
262 break;
263 }
264 if (!domain->is_deleted || atomic_read(&domain->users))
265 continue;
266 /*
267 * Nobody is referring this domain. But somebody may
268 * refer this domain after successful execve().
269 * We recheck domain->users after SRCU synchronization.
270 */
271 if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, domain))
272 list_del_rcu(&domain->list);
273 else
274 break;
275 }
276 }
277 mutex_unlock(&tomoyo_policy_lock);
278 mutex_lock(&tomoyo_name_list_lock);
279 {
280 int i;
281 for (i = 0; i < TOMOYO_MAX_HASH; i++) {
282 struct tomoyo_name_entry *ptr;
283 list_for_each_entry_rcu(ptr, &tomoyo_name_list[i],
284 list) {
285 if (atomic_read(&ptr->users))
286 continue;
287 if (tomoyo_add_to_gc(TOMOYO_ID_NAME, ptr))
288 list_del_rcu(&ptr->list);
289 else {
290 i = TOMOYO_MAX_HASH;
291 break;
292 }
293 }
294 }
295 }
296 mutex_unlock(&tomoyo_name_list_lock);
297}
298
299static void tomoyo_kfree_entry(void)
300{
301 struct tomoyo_gc_entry *p;
302 struct tomoyo_gc_entry *tmp;
303
304 list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) {
305 switch (p->type) {
306 case TOMOYO_ID_DOMAIN_INITIALIZER:
307 tomoyo_del_domain_initializer(p->element);
308 break;
309 case TOMOYO_ID_DOMAIN_KEEPER:
310 tomoyo_del_domain_keeper(p->element);
311 break;
312 case TOMOYO_ID_ALIAS:
313 tomoyo_del_alias(p->element);
314 break;
315 case TOMOYO_ID_GLOBALLY_READABLE:
316 tomoyo_del_allow_read(p->element);
317 break;
318 case TOMOYO_ID_PATTERN:
319 tomoyo_del_file_pattern(p->element);
320 break;
321 case TOMOYO_ID_NO_REWRITE:
322 tomoyo_del_no_rewrite(p->element);
323 break;
324 case TOMOYO_ID_MANAGER:
325 tomoyo_del_manager(p->element);
326 break;
327 case TOMOYO_ID_NAME:
328 tomoyo_del_name(p->element);
329 break;
330 case TOMOYO_ID_ACL:
331 tomoyo_del_acl(p->element);
332 break;
333 case TOMOYO_ID_DOMAIN:
334 if (!tomoyo_del_domain(p->element))
335 continue;
336 break;
337 default:
338 printk(KERN_WARNING "Unknown type\n");
339 break;
340 }
341 tomoyo_memory_free(p->element);
342 list_del(&p->list);
343 kfree(p);
344 }
345}
346
347static int tomoyo_gc_thread(void *unused)
348{
349 daemonize("GC for TOMOYO");
350 if (mutex_trylock(&tomoyo_gc_mutex)) {
351 int i;
352 for (i = 0; i < 10; i++) {
353 tomoyo_collect_entry();
354 if (list_empty(&tomoyo_gc_queue))
355 break;
356 synchronize_srcu(&tomoyo_ss);
357 tomoyo_kfree_entry();
358 }
359 mutex_unlock(&tomoyo_gc_mutex);
360 }
361 do_exit(0);
362}
363
364void tomoyo_run_gc(void)
365{
366 struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL,
367 "GC for TOMOYO");
368 if (!IS_ERR(task))
369 wake_up_process(task);
370}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 18369d497eb8..cf7d61f781b9 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -14,9 +14,8 @@
14#include <linux/mnt_namespace.h> 14#include <linux/mnt_namespace.h>
15#include <linux/fs_struct.h> 15#include <linux/fs_struct.h>
16#include <linux/hash.h> 16#include <linux/hash.h>
17 17#include <linux/magic.h>
18#include "common.h" 18#include "common.h"
19#include "realpath.h"
20 19
21/** 20/**
22 * tomoyo_encode: Convert binary string to ascii string. 21 * tomoyo_encode: Convert binary string to ascii string.
@@ -89,30 +88,15 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
89 sp = dentry->d_op->d_dname(dentry, newname + offset, 88 sp = dentry->d_op->d_dname(dentry, newname + offset,
90 newname_len - offset); 89 newname_len - offset);
91 } else { 90 } else {
92 /* Taken from d_namespace_path(). */ 91 struct path ns_root = {.mnt = NULL, .dentry = NULL};
93 struct path root;
94 struct path ns_root = { };
95 struct path tmp;
96 92
97 read_lock(&current->fs->lock);
98 root = current->fs->root;
99 path_get(&root);
100 read_unlock(&current->fs->lock);
101 spin_lock(&vfsmount_lock);
102 if (root.mnt && root.mnt->mnt_ns)
103 ns_root.mnt = mntget(root.mnt->mnt_ns->root);
104 if (ns_root.mnt)
105 ns_root.dentry = dget(ns_root.mnt->mnt_root);
106 spin_unlock(&vfsmount_lock);
107 spin_lock(&dcache_lock); 93 spin_lock(&dcache_lock);
108 tmp = ns_root; 94 /* go to whatever namespace root we are under */
109 sp = __d_path(path, &tmp, newname, newname_len); 95 sp = __d_path(path, &ns_root, newname, newname_len);
110 spin_unlock(&dcache_lock); 96 spin_unlock(&dcache_lock);
111 path_put(&root);
112 path_put(&ns_root);
113 /* Prepend "/proc" prefix if using internal proc vfs mount. */ 97 /* Prepend "/proc" prefix if using internal proc vfs mount. */
114 if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) && 98 if (!IS_ERR(sp) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
115 (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) { 99 (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
116 sp -= 5; 100 sp -= 5;
117 if (sp >= newname) 101 if (sp >= newname)
118 memcpy(sp, "/proc", 5); 102 memcpy(sp, "/proc", 5);
@@ -149,12 +133,12 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
149 * 133 *
150 * Returns the realpath of the given @path on success, NULL otherwise. 134 * Returns the realpath of the given @path on success, NULL otherwise.
151 * 135 *
152 * These functions use tomoyo_alloc(), so the caller must call tomoyo_free() 136 * These functions use kzalloc(), so the caller must call kfree()
153 * if these functions didn't return NULL. 137 * if these functions didn't return NULL.
154 */ 138 */
155char *tomoyo_realpath_from_path(struct path *path) 139char *tomoyo_realpath_from_path(struct path *path)
156{ 140{
157 char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer)); 141 char *buf = kzalloc(sizeof(struct tomoyo_page_buffer), GFP_KERNEL);
158 142
159 BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer) 143 BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
160 <= TOMOYO_MAX_PATHNAME_LEN - 1); 144 <= TOMOYO_MAX_PATHNAME_LEN - 1);
@@ -163,7 +147,7 @@ char *tomoyo_realpath_from_path(struct path *path)
163 if (tomoyo_realpath_from_path2(path, buf, 147 if (tomoyo_realpath_from_path2(path, buf,
164 TOMOYO_MAX_PATHNAME_LEN - 1) == 0) 148 TOMOYO_MAX_PATHNAME_LEN - 1) == 0)
165 return buf; 149 return buf;
166 tomoyo_free(buf); 150 kfree(buf);
167 return NULL; 151 return NULL;
168} 152}
169 153
@@ -206,98 +190,47 @@ char *tomoyo_realpath_nofollow(const char *pathname)
206} 190}
207 191
208/* Memory allocated for non-string data. */ 192/* Memory allocated for non-string data. */
209static unsigned int tomoyo_allocated_memory_for_elements; 193static atomic_t tomoyo_policy_memory_size;
210/* Quota for holding non-string data. */ 194/* Quota for holding policy. */
211static unsigned int tomoyo_quota_for_elements; 195static unsigned int tomoyo_quota_for_policy;
212 196
213/** 197/**
214 * tomoyo_alloc_element - Allocate permanent memory for structures. 198 * tomoyo_memory_ok - Check memory quota.
215 * 199 *
216 * @size: Size in bytes. 200 * @ptr: Pointer to allocated memory.
217 * 201 *
218 * Returns pointer to allocated memory on success, NULL otherwise. 202 * Returns true on success, false otherwise.
219 * 203 *
220 * Memory has to be zeroed. 204 * Caller holds tomoyo_policy_lock.
221 * The RAM is chunked, so NEVER try to kfree() the returned pointer. 205 * Memory pointed by @ptr will be zeroed on success.
222 */ 206 */
223void *tomoyo_alloc_element(const unsigned int size) 207bool tomoyo_memory_ok(void *ptr)
224{ 208{
225 static char *buf; 209 int allocated_len = ptr ? ksize(ptr) : 0;
226 static DEFINE_MUTEX(lock); 210 atomic_add(allocated_len, &tomoyo_policy_memory_size);
227 static unsigned int buf_used_len = PATH_MAX; 211 if (ptr && (!tomoyo_quota_for_policy ||
228 char *ptr = NULL; 212 atomic_read(&tomoyo_policy_memory_size)
229 /*Assumes sizeof(void *) >= sizeof(long) is true. */ 213 <= tomoyo_quota_for_policy)) {
230 const unsigned int word_aligned_size 214 memset(ptr, 0, allocated_len);
231 = roundup(size, max(sizeof(void *), sizeof(long))); 215 return true;
232 if (word_aligned_size > PATH_MAX)
233 return NULL;
234 mutex_lock(&lock);
235 if (buf_used_len + word_aligned_size > PATH_MAX) {
236 if (!tomoyo_quota_for_elements ||
237 tomoyo_allocated_memory_for_elements
238 + PATH_MAX <= tomoyo_quota_for_elements)
239 ptr = kzalloc(PATH_MAX, GFP_KERNEL);
240 if (!ptr) {
241 printk(KERN_WARNING "ERROR: Out of memory "
242 "for tomoyo_alloc_element().\n");
243 if (!tomoyo_policy_loaded)
244 panic("MAC Initialization failed.\n");
245 } else {
246 buf = ptr;
247 tomoyo_allocated_memory_for_elements += PATH_MAX;
248 buf_used_len = word_aligned_size;
249 ptr = buf;
250 }
251 } else if (word_aligned_size) {
252 int i;
253 ptr = buf + buf_used_len;
254 buf_used_len += word_aligned_size;
255 for (i = 0; i < word_aligned_size; i++) {
256 if (!ptr[i])
257 continue;
258 printk(KERN_ERR "WARNING: Reserved memory was tainted! "
259 "The system might go wrong.\n");
260 ptr[i] = '\0';
261 }
262 } 216 }
263 mutex_unlock(&lock); 217 printk(KERN_WARNING "ERROR: Out of memory "
264 return ptr; 218 "for tomoyo_alloc_element().\n");
219 if (!tomoyo_policy_loaded)
220 panic("MAC Initialization failed.\n");
221 return false;
265} 222}
266 223
267/* Memory allocated for string data in bytes. */ 224/**
268static unsigned int tomoyo_allocated_memory_for_savename; 225 * tomoyo_memory_free - Free memory for elements.
269/* Quota for holding string data in bytes. */
270static unsigned int tomoyo_quota_for_savename;
271
272/*
273 * TOMOYO uses this hash only when appending a string into the string
274 * table. Frequency of appending strings is very low. So we don't need
275 * large (e.g. 64k) hash size. 256 will be sufficient.
276 */
277#define TOMOYO_HASH_BITS 8
278#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
279
280/*
281 * tomoyo_name_entry is a structure which is used for linking
282 * "struct tomoyo_path_info" into tomoyo_name_list .
283 * 226 *
284 * Since tomoyo_name_list manages a list of strings which are shared by 227 * @ptr: Pointer to allocated memory.
285 * multiple processes (whereas "struct tomoyo_path_info" inside
286 * "struct tomoyo_path_info_with_data" is not shared), a reference counter will
287 * be added to "struct tomoyo_name_entry" rather than "struct tomoyo_path_info"
288 * when TOMOYO starts supporting garbage collector.
289 */ 228 */
290struct tomoyo_name_entry { 229void tomoyo_memory_free(void *ptr)
291 struct list_head list; 230{
292 struct tomoyo_path_info entry; 231 atomic_sub(ksize(ptr), &tomoyo_policy_memory_size);
293}; 232 kfree(ptr);
294 233}
295/* Structure for available memory region. */
296struct tomoyo_free_memory_block_list {
297 struct list_head list;
298 char *ptr; /* Pointer to a free area. */
299 int len; /* Length of the area. */
300};
301 234
302/* 235/*
303 * tomoyo_name_list is used for holding string data used by TOMOYO. 236 * tomoyo_name_list is used for holding string data used by TOMOYO.
@@ -305,87 +238,58 @@ struct tomoyo_free_memory_block_list {
305 * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of 238 * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
306 * "const struct tomoyo_path_info *". 239 * "const struct tomoyo_path_info *".
307 */ 240 */
308static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH]; 241struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
242/* Lock for protecting tomoyo_name_list . */
243DEFINE_MUTEX(tomoyo_name_list_lock);
309 244
310/** 245/**
311 * tomoyo_save_name - Allocate permanent memory for string data. 246 * tomoyo_get_name - Allocate permanent memory for string data.
312 * 247 *
313 * @name: The string to store into the permernent memory. 248 * @name: The string to store into the permernent memory.
314 * 249 *
315 * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise. 250 * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
316 *
317 * The RAM is shared, so NEVER try to modify or kfree() the returned name.
318 */ 251 */
319const struct tomoyo_path_info *tomoyo_save_name(const char *name) 252const struct tomoyo_path_info *tomoyo_get_name(const char *name)
320{ 253{
321 static LIST_HEAD(fmb_list);
322 static DEFINE_MUTEX(lock);
323 struct tomoyo_name_entry *ptr; 254 struct tomoyo_name_entry *ptr;
324 unsigned int hash; 255 unsigned int hash;
325 /* fmb contains available size in bytes.
326 fmb is removed from the fmb_list when fmb->len becomes 0. */
327 struct tomoyo_free_memory_block_list *fmb;
328 int len; 256 int len;
329 char *cp; 257 int allocated_len;
330 struct list_head *head; 258 struct list_head *head;
331 259
332 if (!name) 260 if (!name)
333 return NULL; 261 return NULL;
334 len = strlen(name) + 1; 262 len = strlen(name) + 1;
335 if (len > TOMOYO_MAX_PATHNAME_LEN) {
336 printk(KERN_WARNING "ERROR: Name too long "
337 "for tomoyo_save_name().\n");
338 return NULL;
339 }
340 hash = full_name_hash((const unsigned char *) name, len - 1); 263 hash = full_name_hash((const unsigned char *) name, len - 1);
341 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; 264 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
342 265 mutex_lock(&tomoyo_name_list_lock);
343 mutex_lock(&lock);
344 list_for_each_entry(ptr, head, list) { 266 list_for_each_entry(ptr, head, list) {
345 if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name)) 267 if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
346 goto out; 268 continue;
347 } 269 atomic_inc(&ptr->users);
348 list_for_each_entry(fmb, &fmb_list, list) { 270 goto out;
349 if (len <= fmb->len)
350 goto ready;
351 } 271 }
352 if (!tomoyo_quota_for_savename || 272 ptr = kzalloc(sizeof(*ptr) + len, GFP_KERNEL);
353 tomoyo_allocated_memory_for_savename + PATH_MAX 273 allocated_len = ptr ? ksize(ptr) : 0;
354 <= tomoyo_quota_for_savename) 274 if (!ptr || (tomoyo_quota_for_policy &&
355 cp = kzalloc(PATH_MAX, GFP_KERNEL); 275 atomic_read(&tomoyo_policy_memory_size) + allocated_len
356 else 276 > tomoyo_quota_for_policy)) {
357 cp = NULL; 277 kfree(ptr);
358 fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
359 if (!cp || !fmb) {
360 kfree(cp);
361 kfree(fmb);
362 printk(KERN_WARNING "ERROR: Out of memory " 278 printk(KERN_WARNING "ERROR: Out of memory "
363 "for tomoyo_save_name().\n"); 279 "for tomoyo_get_name().\n");
364 if (!tomoyo_policy_loaded) 280 if (!tomoyo_policy_loaded)
365 panic("MAC Initialization failed.\n"); 281 panic("MAC Initialization failed.\n");
366 ptr = NULL; 282 ptr = NULL;
367 goto out; 283 goto out;
368 } 284 }
369 tomoyo_allocated_memory_for_savename += PATH_MAX; 285 atomic_add(allocated_len, &tomoyo_policy_memory_size);
370 list_add(&fmb->list, &fmb_list); 286 ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
371 fmb->ptr = cp; 287 memmove((char *) ptr->entry.name, name, len);
372 fmb->len = PATH_MAX; 288 atomic_set(&ptr->users, 1);
373 ready:
374 ptr = tomoyo_alloc_element(sizeof(*ptr));
375 if (!ptr)
376 goto out;
377 ptr->entry.name = fmb->ptr;
378 memmove(fmb->ptr, name, len);
379 tomoyo_fill_path_info(&ptr->entry); 289 tomoyo_fill_path_info(&ptr->entry);
380 fmb->ptr += len;
381 fmb->len -= len;
382 list_add_tail(&ptr->list, head); 290 list_add_tail(&ptr->list, head);
383 if (fmb->len == 0) {
384 list_del(&fmb->list);
385 kfree(fmb);
386 }
387 out: 291 out:
388 mutex_unlock(&lock); 292 mutex_unlock(&tomoyo_name_list_lock);
389 return ptr ? &ptr->entry : NULL; 293 return ptr ? &ptr->entry : NULL;
390} 294}
391 295
@@ -400,45 +304,14 @@ void __init tomoyo_realpath_init(void)
400 for (i = 0; i < TOMOYO_MAX_HASH; i++) 304 for (i = 0; i < TOMOYO_MAX_HASH; i++)
401 INIT_LIST_HEAD(&tomoyo_name_list[i]); 305 INIT_LIST_HEAD(&tomoyo_name_list[i]);
402 INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list); 306 INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
403 tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME); 307 tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME);
404 list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list); 308 /*
405 down_read(&tomoyo_domain_list_lock); 309 * tomoyo_read_lock() is not needed because this function is
310 * called before the first "delete" request.
311 */
312 list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
406 if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain) 313 if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
407 panic("Can't register tomoyo_kernel_domain"); 314 panic("Can't register tomoyo_kernel_domain");
408 up_read(&tomoyo_domain_list_lock);
409}
410
411/* Memory allocated for temporary purpose. */
412static atomic_t tomoyo_dynamic_memory_size;
413
414/**
415 * tomoyo_alloc - Allocate memory for temporary purpose.
416 *
417 * @size: Size in bytes.
418 *
419 * Returns pointer to allocated memory on success, NULL otherwise.
420 */
421void *tomoyo_alloc(const size_t size)
422{
423 void *p = kzalloc(size, GFP_KERNEL);
424 if (p)
425 atomic_add(ksize(p), &tomoyo_dynamic_memory_size);
426 return p;
427}
428
429/**
430 * tomoyo_free - Release memory allocated by tomoyo_alloc().
431 *
432 * @p: Pointer returned by tomoyo_alloc(). May be NULL.
433 *
434 * Returns nothing.
435 */
436void tomoyo_free(const void *p)
437{
438 if (p) {
439 atomic_sub(ksize(p), &tomoyo_dynamic_memory_size);
440 kfree(p);
441 }
442} 315}
443 316
444/** 317/**
@@ -451,32 +324,19 @@ void tomoyo_free(const void *p)
451int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head) 324int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head)
452{ 325{
453 if (!head->read_eof) { 326 if (!head->read_eof) {
454 const unsigned int shared 327 const unsigned int policy
455 = tomoyo_allocated_memory_for_savename; 328 = atomic_read(&tomoyo_policy_memory_size);
456 const unsigned int private
457 = tomoyo_allocated_memory_for_elements;
458 const unsigned int dynamic
459 = atomic_read(&tomoyo_dynamic_memory_size);
460 char buffer[64]; 329 char buffer[64];
461 330
462 memset(buffer, 0, sizeof(buffer)); 331 memset(buffer, 0, sizeof(buffer));
463 if (tomoyo_quota_for_savename) 332 if (tomoyo_quota_for_policy)
464 snprintf(buffer, sizeof(buffer) - 1,
465 " (Quota: %10u)",
466 tomoyo_quota_for_savename);
467 else
468 buffer[0] = '\0';
469 tomoyo_io_printf(head, "Shared: %10u%s\n", shared, buffer);
470 if (tomoyo_quota_for_elements)
471 snprintf(buffer, sizeof(buffer) - 1, 333 snprintf(buffer, sizeof(buffer) - 1,
472 " (Quota: %10u)", 334 " (Quota: %10u)",
473 tomoyo_quota_for_elements); 335 tomoyo_quota_for_policy);
474 else 336 else
475 buffer[0] = '\0'; 337 buffer[0] = '\0';
476 tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer); 338 tomoyo_io_printf(head, "Policy: %10u%s\n", policy, buffer);
477 tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic); 339 tomoyo_io_printf(head, "Total: %10u\n", policy);
478 tomoyo_io_printf(head, "Total: %10u\n",
479 shared + private + dynamic);
480 head->read_eof = true; 340 head->read_eof = true;
481 } 341 }
482 return 0; 342 return 0;
@@ -494,9 +354,7 @@ int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head)
494 char *data = head->write_buf; 354 char *data = head->write_buf;
495 unsigned int size; 355 unsigned int size;
496 356
497 if (sscanf(data, "Shared: %u", &size) == 1) 357 if (sscanf(data, "Policy: %u", &size) == 1)
498 tomoyo_quota_for_savename = size; 358 tomoyo_quota_for_policy = size;
499 else if (sscanf(data, "Private: %u", &size) == 1)
500 tomoyo_quota_for_elements = size;
501 return 0; 359 return 0;
502} 360}
diff --git a/security/tomoyo/realpath.h b/security/tomoyo/realpath.h
deleted file mode 100644
index 78217a37960b..000000000000
--- a/security/tomoyo/realpath.h
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * security/tomoyo/realpath.h
3 *
4 * Get the canonicalized absolute pathnames. The basis for TOMOYO.
5 *
6 * Copyright (C) 2005-2009 NTT DATA CORPORATION
7 *
8 * Version: 2.2.0 2009/04/01
9 *
10 */
11
12#ifndef _SECURITY_TOMOYO_REALPATH_H
13#define _SECURITY_TOMOYO_REALPATH_H
14
15struct path;
16struct tomoyo_path_info;
17struct tomoyo_io_buffer;
18
19/* Convert binary string to ascii string. */
20int tomoyo_encode(char *buffer, int buflen, const char *str);
21
22/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
23int tomoyo_realpath_from_path2(struct path *path, char *newname,
24 int newname_len);
25
26/*
27 * Returns realpath(3) of the given pathname but ignores chroot'ed root.
28 * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
29 * if these functions didn't return NULL.
30 */
31char *tomoyo_realpath(const char *pathname);
32/*
33 * Same with tomoyo_realpath() except that it doesn't follow the final symlink.
34 */
35char *tomoyo_realpath_nofollow(const char *pathname);
36/* Same with tomoyo_realpath() except that the pathname is already solved. */
37char *tomoyo_realpath_from_path(struct path *path);
38
39/*
40 * Allocate memory for ACL entry.
41 * The RAM is chunked, so NEVER try to kfree() the returned pointer.
42 */
43void *tomoyo_alloc_element(const unsigned int size);
44
45/*
46 * Keep the given name on the RAM.
47 * The RAM is shared, so NEVER try to modify or kfree() the returned name.
48 */
49const struct tomoyo_path_info *tomoyo_save_name(const char *name);
50
51/* Allocate memory for temporary use (e.g. permission checks). */
52void *tomoyo_alloc(const size_t size);
53
54/* Free memory allocated by tomoyo_alloc(). */
55void tomoyo_free(const void *p);
56
57/* Check for memory usage. */
58int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
59
60/* Set memory quota. */
61int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
62
63/* Initialize realpath related code. */
64void __init tomoyo_realpath_init(void);
65
66#endif /* !defined(_SECURITY_TOMOYO_REALPATH_H) */
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 2aceebf5f354..dedd97d0c163 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -11,8 +11,6 @@
11 11
12#include <linux/security.h> 12#include <linux/security.h>
13#include "common.h" 13#include "common.h"
14#include "tomoyo.h"
15#include "realpath.h"
16 14
17static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp) 15static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
18{ 16{
@@ -23,21 +21,23 @@ static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
23static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, 21static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
24 gfp_t gfp) 22 gfp_t gfp)
25{ 23{
26 /* 24 struct tomoyo_domain_info *domain = old->security;
27 * Since "struct tomoyo_domain_info *" is a sharable pointer, 25 new->security = domain;
28 * we don't need to duplicate. 26 if (domain)
29 */ 27 atomic_inc(&domain->users);
30 new->security = old->security;
31 return 0; 28 return 0;
32} 29}
33 30
34static void tomoyo_cred_transfer(struct cred *new, const struct cred *old) 31static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
35{ 32{
36 /* 33 tomoyo_cred_prepare(new, old, 0);
37 * Since "struct tomoyo_domain_info *" is a sharable pointer, 34}
38 * we don't need to duplicate. 35
39 */ 36static void tomoyo_cred_free(struct cred *cred)
40 new->security = old->security; 37{
38 struct tomoyo_domain_info *domain = cred->security;
39 if (domain)
40 atomic_dec(&domain->users);
41} 41}
42 42
43static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) 43static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
@@ -61,6 +61,14 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
61 if (!tomoyo_policy_loaded) 61 if (!tomoyo_policy_loaded)
62 tomoyo_load_policy(bprm->filename); 62 tomoyo_load_policy(bprm->filename);
63 /* 63 /*
64 * Release reference to "struct tomoyo_domain_info" stored inside
65 * "bprm->cred->security". New reference to "struct tomoyo_domain_info"
66 * stored inside "bprm->cred->security" will be acquired later inside
67 * tomoyo_find_next_domain().
68 */
69 atomic_dec(&((struct tomoyo_domain_info *)
70 bprm->cred->security)->users);
71 /*
64 * Tell tomoyo_bprm_check_security() is called for the first time of an 72 * Tell tomoyo_bprm_check_security() is called for the first time of an
65 * execve operation. 73 * execve operation.
66 */ 74 */
@@ -76,8 +84,12 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
76 * Execute permission is checked against pathname passed to do_execve() 84 * Execute permission is checked against pathname passed to do_execve()
77 * using current domain. 85 * using current domain.
78 */ 86 */
79 if (!domain) 87 if (!domain) {
80 return tomoyo_find_next_domain(bprm); 88 const int idx = tomoyo_read_lock();
89 const int err = tomoyo_find_next_domain(bprm);
90 tomoyo_read_unlock(idx);
91 return err;
92 }
81 /* 93 /*
82 * Read permission is checked against interpreters using next domain. 94 * Read permission is checked against interpreters using next domain.
83 */ 95 */
@@ -87,67 +99,56 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
87static int tomoyo_path_truncate(struct path *path, loff_t length, 99static int tomoyo_path_truncate(struct path *path, loff_t length,
88 unsigned int time_attrs) 100 unsigned int time_attrs)
89{ 101{
90 return tomoyo_check_1path_perm(tomoyo_domain(), 102 return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path);
91 TOMOYO_TYPE_TRUNCATE_ACL,
92 path);
93} 103}
94 104
95static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry) 105static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
96{ 106{
97 struct path path = { parent->mnt, dentry }; 107 struct path path = { parent->mnt, dentry };
98 return tomoyo_check_1path_perm(tomoyo_domain(), 108 return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path);
99 TOMOYO_TYPE_UNLINK_ACL,
100 &path);
101} 109}
102 110
103static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry, 111static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
104 int mode) 112 int mode)
105{ 113{
106 struct path path = { parent->mnt, dentry }; 114 struct path path = { parent->mnt, dentry };
107 return tomoyo_check_1path_perm(tomoyo_domain(), 115 return tomoyo_path_perm(TOMOYO_TYPE_MKDIR, &path);
108 TOMOYO_TYPE_MKDIR_ACL,
109 &path);
110} 116}
111 117
112static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry) 118static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
113{ 119{
114 struct path path = { parent->mnt, dentry }; 120 struct path path = { parent->mnt, dentry };
115 return tomoyo_check_1path_perm(tomoyo_domain(), 121 return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path);
116 TOMOYO_TYPE_RMDIR_ACL,
117 &path);
118} 122}
119 123
120static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry, 124static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
121 const char *old_name) 125 const char *old_name)
122{ 126{
123 struct path path = { parent->mnt, dentry }; 127 struct path path = { parent->mnt, dentry };
124 return tomoyo_check_1path_perm(tomoyo_domain(), 128 return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path);
125 TOMOYO_TYPE_SYMLINK_ACL,
126 &path);
127} 129}
128 130
129static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, 131static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
130 int mode, unsigned int dev) 132 int mode, unsigned int dev)
131{ 133{
132 struct path path = { parent->mnt, dentry }; 134 struct path path = { parent->mnt, dentry };
133 int type = TOMOYO_TYPE_CREATE_ACL; 135 int type = TOMOYO_TYPE_CREATE;
134 136
135 switch (mode & S_IFMT) { 137 switch (mode & S_IFMT) {
136 case S_IFCHR: 138 case S_IFCHR:
137 type = TOMOYO_TYPE_MKCHAR_ACL; 139 type = TOMOYO_TYPE_MKCHAR;
138 break; 140 break;
139 case S_IFBLK: 141 case S_IFBLK:
140 type = TOMOYO_TYPE_MKBLOCK_ACL; 142 type = TOMOYO_TYPE_MKBLOCK;
141 break; 143 break;
142 case S_IFIFO: 144 case S_IFIFO:
143 type = TOMOYO_TYPE_MKFIFO_ACL; 145 type = TOMOYO_TYPE_MKFIFO;
144 break; 146 break;
145 case S_IFSOCK: 147 case S_IFSOCK:
146 type = TOMOYO_TYPE_MKSOCK_ACL; 148 type = TOMOYO_TYPE_MKSOCK;
147 break; 149 break;
148 } 150 }
149 return tomoyo_check_1path_perm(tomoyo_domain(), 151 return tomoyo_path_perm(type, &path);
150 type, &path);
151} 152}
152 153
153static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir, 154static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -155,9 +156,7 @@ static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
155{ 156{
156 struct path path1 = { new_dir->mnt, old_dentry }; 157 struct path path1 = { new_dir->mnt, old_dentry };
157 struct path path2 = { new_dir->mnt, new_dentry }; 158 struct path path2 = { new_dir->mnt, new_dentry };
158 return tomoyo_check_2path_perm(tomoyo_domain(), 159 return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
159 TOMOYO_TYPE_LINK_ACL,
160 &path1, &path2);
161} 160}
162 161
163static int tomoyo_path_rename(struct path *old_parent, 162static int tomoyo_path_rename(struct path *old_parent,
@@ -167,16 +166,14 @@ static int tomoyo_path_rename(struct path *old_parent,
167{ 166{
168 struct path path1 = { old_parent->mnt, old_dentry }; 167 struct path path1 = { old_parent->mnt, old_dentry };
169 struct path path2 = { new_parent->mnt, new_dentry }; 168 struct path path2 = { new_parent->mnt, new_dentry };
170 return tomoyo_check_2path_perm(tomoyo_domain(), 169 return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
171 TOMOYO_TYPE_RENAME_ACL,
172 &path1, &path2);
173} 170}
174 171
175static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, 172static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
176 unsigned long arg) 173 unsigned long arg)
177{ 174{
178 if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)) 175 if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))
179 return tomoyo_check_rewrite_permission(tomoyo_domain(), file); 176 return tomoyo_check_rewrite_permission(file);
180 return 0; 177 return 0;
181} 178}
182 179
@@ -189,6 +186,51 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
189 return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags); 186 return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
190} 187}
191 188
189static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
190 unsigned long arg)
191{
192 return tomoyo_path_perm(TOMOYO_TYPE_IOCTL, &file->f_path);
193}
194
195static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
196 mode_t mode)
197{
198 struct path path = { mnt, dentry };
199 return tomoyo_path_perm(TOMOYO_TYPE_CHMOD, &path);
200}
201
202static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid)
203{
204 int error = 0;
205 if (uid != (uid_t) -1)
206 error = tomoyo_path_perm(TOMOYO_TYPE_CHOWN, path);
207 if (!error && gid != (gid_t) -1)
208 error = tomoyo_path_perm(TOMOYO_TYPE_CHGRP, path);
209 return error;
210}
211
212static int tomoyo_path_chroot(struct path *path)
213{
214 return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path);
215}
216
217static int tomoyo_sb_mount(char *dev_name, struct path *path,
218 char *type, unsigned long flags, void *data)
219{
220 return tomoyo_path_perm(TOMOYO_TYPE_MOUNT, path);
221}
222
223static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
224{
225 struct path path = { mnt, mnt->mnt_root };
226 return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path);
227}
228
229static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
230{
231 return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
232}
233
192/* 234/*
193 * tomoyo_security_ops is a "struct security_operations" which is used for 235 * tomoyo_security_ops is a "struct security_operations" which is used for
194 * registering TOMOYO. 236 * registering TOMOYO.
@@ -198,6 +240,7 @@ static struct security_operations tomoyo_security_ops = {
198 .cred_alloc_blank = tomoyo_cred_alloc_blank, 240 .cred_alloc_blank = tomoyo_cred_alloc_blank,
199 .cred_prepare = tomoyo_cred_prepare, 241 .cred_prepare = tomoyo_cred_prepare,
200 .cred_transfer = tomoyo_cred_transfer, 242 .cred_transfer = tomoyo_cred_transfer,
243 .cred_free = tomoyo_cred_free,
201 .bprm_set_creds = tomoyo_bprm_set_creds, 244 .bprm_set_creds = tomoyo_bprm_set_creds,
202 .bprm_check_security = tomoyo_bprm_check_security, 245 .bprm_check_security = tomoyo_bprm_check_security,
203 .file_fcntl = tomoyo_file_fcntl, 246 .file_fcntl = tomoyo_file_fcntl,
@@ -210,8 +253,18 @@ static struct security_operations tomoyo_security_ops = {
210 .path_mknod = tomoyo_path_mknod, 253 .path_mknod = tomoyo_path_mknod,
211 .path_link = tomoyo_path_link, 254 .path_link = tomoyo_path_link,
212 .path_rename = tomoyo_path_rename, 255 .path_rename = tomoyo_path_rename,
256 .file_ioctl = tomoyo_file_ioctl,
257 .path_chmod = tomoyo_path_chmod,
258 .path_chown = tomoyo_path_chown,
259 .path_chroot = tomoyo_path_chroot,
260 .sb_mount = tomoyo_sb_mount,
261 .sb_umount = tomoyo_sb_umount,
262 .sb_pivotroot = tomoyo_sb_pivotroot,
213}; 263};
214 264
265/* Lock for GC. */
266struct srcu_struct tomoyo_ss;
267
215static int __init tomoyo_init(void) 268static int __init tomoyo_init(void)
216{ 269{
217 struct cred *cred = (struct cred *) current_cred(); 270 struct cred *cred = (struct cred *) current_cred();
@@ -219,7 +272,8 @@ static int __init tomoyo_init(void)
219 if (!security_module_enable(&tomoyo_security_ops)) 272 if (!security_module_enable(&tomoyo_security_ops))
220 return 0; 273 return 0;
221 /* register ourselves with the security framework */ 274 /* register ourselves with the security framework */
222 if (register_security(&tomoyo_security_ops)) 275 if (register_security(&tomoyo_security_ops) ||
276 init_srcu_struct(&tomoyo_ss))
223 panic("Failure registering TOMOYO Linux"); 277 panic("Failure registering TOMOYO Linux");
224 printk(KERN_INFO "TOMOYO Linux initialized\n"); 278 printk(KERN_INFO "TOMOYO Linux initialized\n");
225 cred->security = &tomoyo_kernel_domain; 279 cred->security = &tomoyo_kernel_domain;
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
deleted file mode 100644
index ed758325b1ae..000000000000
--- a/security/tomoyo/tomoyo.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * security/tomoyo/tomoyo.h
3 *
4 * Implementation of the Domain-Based Mandatory Access Control.
5 *
6 * Copyright (C) 2005-2009 NTT DATA CORPORATION
7 *
8 * Version: 2.2.0 2009/04/01
9 *
10 */
11
12#ifndef _SECURITY_TOMOYO_TOMOYO_H
13#define _SECURITY_TOMOYO_TOMOYO_H
14
15struct tomoyo_path_info;
16struct path;
17struct inode;
18struct linux_binprm;
19struct pt_regs;
20
21int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
22 const struct tomoyo_path_info *filename);
23int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
24 struct path *path, const int flag);
25int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
26 const u8 operation, struct path *path);
27int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
28 const u8 operation, struct path *path1,
29 struct path *path2);
30int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
31 struct file *filp);
32int tomoyo_find_next_domain(struct linux_binprm *bprm);
33
34/* Index numbers for Access Controls. */
35
36#define TOMOYO_TYPE_SINGLE_PATH_ACL 0
37#define TOMOYO_TYPE_DOUBLE_PATH_ACL 1
38
39/* Index numbers for File Controls. */
40
41/*
42 * TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
43 * if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
44 * TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
45 * TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
46 * TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
47 * automatically cleared if TYPE_READ_WRITE_ACL is cleared.
48 */
49
50#define TOMOYO_TYPE_READ_WRITE_ACL 0
51#define TOMOYO_TYPE_EXECUTE_ACL 1
52#define TOMOYO_TYPE_READ_ACL 2
53#define TOMOYO_TYPE_WRITE_ACL 3
54#define TOMOYO_TYPE_CREATE_ACL 4
55#define TOMOYO_TYPE_UNLINK_ACL 5
56#define TOMOYO_TYPE_MKDIR_ACL 6
57#define TOMOYO_TYPE_RMDIR_ACL 7
58#define TOMOYO_TYPE_MKFIFO_ACL 8
59#define TOMOYO_TYPE_MKSOCK_ACL 9
60#define TOMOYO_TYPE_MKBLOCK_ACL 10
61#define TOMOYO_TYPE_MKCHAR_ACL 11
62#define TOMOYO_TYPE_TRUNCATE_ACL 12
63#define TOMOYO_TYPE_SYMLINK_ACL 13
64#define TOMOYO_TYPE_REWRITE_ACL 14
65#define TOMOYO_MAX_SINGLE_PATH_OPERATION 15
66
67#define TOMOYO_TYPE_LINK_ACL 0
68#define TOMOYO_TYPE_RENAME_ACL 1
69#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 2
70
71#define TOMOYO_DOMAINPOLICY 0
72#define TOMOYO_EXCEPTIONPOLICY 1
73#define TOMOYO_DOMAIN_STATUS 2
74#define TOMOYO_PROCESS_STATUS 3
75#define TOMOYO_MEMINFO 4
76#define TOMOYO_SELFDOMAIN 5
77#define TOMOYO_VERSION 6
78#define TOMOYO_PROFILE 7
79#define TOMOYO_MANAGER 8
80
81extern struct tomoyo_domain_info tomoyo_kernel_domain;
82
83static inline struct tomoyo_domain_info *tomoyo_domain(void)
84{
85 return current_cred()->security;
86}
87
88static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
89 *task)
90{
91 return task_cred_xxx(task, security);
92}
93
94#endif /* !defined(_SECURITY_TOMOYO_TOMOYO_H) */
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 2de34075f6a4..34202b1be0bb 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -41,7 +41,8 @@ OPTIONS
41 41
42-d:: 42-d::
43--del=:: 43--del=::
44 Delete a probe event. 44 Delete probe events. This accepts glob wildcards('*', '?') and character
45 classes(e.g. [a-z], [!A-Z]).
45 46
46-l:: 47-l::
47--list:: 48--list::
@@ -50,17 +51,29 @@ OPTIONS
50-L:: 51-L::
51--line=:: 52--line=::
52 Show source code lines which can be probed. This needs an argument 53 Show source code lines which can be probed. This needs an argument
53 which specifies a range of the source code. 54 which specifies a range of the source code. (see LINE SYNTAX for detail)
55
56-f::
57--force::
58 Forcibly add events with existing name.
54 59
55PROBE SYNTAX 60PROBE SYNTAX
56------------ 61------------
57Probe points are defined by following syntax. 62Probe points are defined by following syntax.
58 63
59 "[EVENT=]FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]" 64 1) Define event based on function name
65 [EVENT=]FUNC[@SRC][:RLN|+OFFS|%return|;PTN] [ARG ...]
66
67 2) Define event based on source file with line number
68 [EVENT=]SRC:ALN [ARG ...]
69
70 3) Define event based on source file with lazy pattern
71 [EVENT=]SRC;PTN [ARG ...]
72
60 73
61'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'. 74'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
62'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function. 75'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
63It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. 76It is also possible to specify a probe point by the source line number or lazy matching by using 'SRC:ALN' or 'SRC;PTN' syntax, where 'SRC' is the source file path, ':ALN' is the line number and ';PTN' is the lazy matching pattern.
64'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc). 77'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
65 78
66LINE SYNTAX 79LINE SYNTAX
@@ -76,6 +89,41 @@ and 'ALN2' is end line number in the file. It is also possible to specify how
76many lines to show by using 'NUM'. 89many lines to show by using 'NUM'.
77So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function. 90So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
78 91
92LAZY MATCHING
93-------------
94 The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
95
96e.g.
97 'a=*' can matches 'a=b', 'a = b', 'a == b' and so on.
98
99This provides some sort of flexibility and robustness to probe point definitions against minor code changes. For example, actual 10th line of schedule() can be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist in the function.)
100
101
102EXAMPLES
103--------
104Display which lines in schedule() can be probed:
105
106 ./perf probe --line schedule
107
108Add a probe on schedule() function 12th line with recording cpu local variable:
109
110 ./perf probe schedule:12 cpu
111 or
112 ./perf probe --add='schedule:12 cpu'
113
114 this will add one or more probes which has the name start with "schedule".
115
116 Add probes on lines in schedule() function which calls update_rq_clock().
117
118 ./perf probe 'schedule;update_rq_clock*'
119 or
120 ./perf probe --add='schedule;update_rq_clock*'
121
122Delete all probes on schedule().
123
124 ./perf probe --del='schedule*'
125
126
79SEE ALSO 127SEE ALSO
80-------- 128--------
81linkperf:perf-trace[1], linkperf:perf-record[1] 129linkperf:perf-trace[1], linkperf:perf-record[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 54a5b50ff312..2d537382c686 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -500,12 +500,12 @@ else
500 msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); 500 msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
501endif 501endif
502 502
503ifneq ($(shell sh -c "(echo '\#ifndef _MIPS_SZLONG'; echo '\#define _MIPS_SZLONG 0'; echo '\#endif'; echo '\#include <dwarf.h>'; echo '\#include <libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/libdwarf -ldwarf -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 503ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
504 msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231); 504 msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev);
505 BASIC_CFLAGS += -DNO_LIBDWARF 505 BASIC_CFLAGS += -DNO_DWARF_SUPPORT
506else 506else
507 BASIC_CFLAGS += -I/usr/include/libdwarf 507 BASIC_CFLAGS += -I/usr/include/elfutils
508 EXTLIBS += -lelf -ldwarf 508 EXTLIBS += -lelf -ldw
509 LIB_OBJS += util/probe-finder.o 509 LIB_OBJS += util/probe-finder.o
510endif 510endif
511 511
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index ad47bd4c50ef..c30a33592340 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -128,7 +128,7 @@ static void evaluate_probe_point(struct probe_point *pp)
128 pp->function); 128 pp->function);
129} 129}
130 130
131#ifndef NO_LIBDWARF 131#ifndef NO_DWARF_SUPPORT
132static int open_vmlinux(void) 132static int open_vmlinux(void)
133{ 133{
134 if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) { 134 if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) {
@@ -156,14 +156,16 @@ static const char * const probe_usage[] = {
156 "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", 156 "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
157 "perf probe [<options>] --del '[GROUP:]EVENT' ...", 157 "perf probe [<options>] --del '[GROUP:]EVENT' ...",
158 "perf probe --list", 158 "perf probe --list",
159#ifndef NO_DWARF_SUPPORT
159 "perf probe --line 'LINEDESC'", 160 "perf probe --line 'LINEDESC'",
161#endif
160 NULL 162 NULL
161}; 163};
162 164
163static const struct option options[] = { 165static const struct option options[] = {
164 OPT_BOOLEAN('v', "verbose", &verbose, 166 OPT_BOOLEAN('v', "verbose", &verbose,
165 "be more verbose (show parsed arguments, etc)"), 167 "be more verbose (show parsed arguments, etc)"),
166#ifndef NO_LIBDWARF 168#ifndef NO_DWARF_SUPPORT
167 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 169 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
168 "file", "vmlinux pathname"), 170 "file", "vmlinux pathname"),
169#endif 171#endif
@@ -172,30 +174,32 @@ static const struct option options[] = {
172 OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", 174 OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
173 opt_del_probe_event), 175 opt_del_probe_event),
174 OPT_CALLBACK('a', "add", NULL, 176 OPT_CALLBACK('a', "add", NULL,
175#ifdef NO_LIBDWARF 177#ifdef NO_DWARF_SUPPORT
176 "[EVENT=]FUNC[+OFFS|%return] [ARG ...]", 178 "[EVENT=]FUNC[+OFF|%return] [ARG ...]",
177#else 179#else
178 "[EVENT=]FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]", 180 "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
181 " [ARG ...]",
179#endif 182#endif
180 "probe point definition, where\n" 183 "probe point definition, where\n"
181 "\t\tGROUP:\tGroup name (optional)\n" 184 "\t\tGROUP:\tGroup name (optional)\n"
182 "\t\tEVENT:\tEvent name\n" 185 "\t\tEVENT:\tEvent name\n"
183 "\t\tFUNC:\tFunction name\n" 186 "\t\tFUNC:\tFunction name\n"
184 "\t\tOFFS:\tOffset from function entry (in byte)\n" 187 "\t\tOFF:\tOffset from function entry (in byte)\n"
185 "\t\t%return:\tPut the probe at function return\n" 188 "\t\t%return:\tPut the probe at function return\n"
186#ifdef NO_LIBDWARF 189#ifdef NO_DWARF_SUPPORT
187 "\t\tARG:\tProbe argument (only \n" 190 "\t\tARG:\tProbe argument (only \n"
188#else 191#else
189 "\t\tSRC:\tSource code path\n" 192 "\t\tSRC:\tSource code path\n"
190 "\t\tRLN:\tRelative line number from function entry.\n" 193 "\t\tRL:\tRelative line number from function entry.\n"
191 "\t\tALN:\tAbsolute line number in file.\n" 194 "\t\tAL:\tAbsolute line number in file.\n"
195 "\t\tPT:\tLazy expression of line code.\n"
192 "\t\tARG:\tProbe argument (local variable name or\n" 196 "\t\tARG:\tProbe argument (local variable name or\n"
193#endif 197#endif
194 "\t\t\tkprobe-tracer argument format.)\n", 198 "\t\t\tkprobe-tracer argument format.)\n",
195 opt_add_probe_event), 199 opt_add_probe_event),
196 OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events" 200 OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
197 " with existing name"), 201 " with existing name"),
198#ifndef NO_LIBDWARF 202#ifndef NO_DWARF_SUPPORT
199 OPT_CALLBACK('L', "line", NULL, 203 OPT_CALLBACK('L', "line", NULL,
200 "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]", 204 "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]",
201 "Show source code lines.", opt_show_lines), 205 "Show source code lines.", opt_show_lines),
@@ -223,7 +227,7 @@ static void init_vmlinux(void)
223int cmd_probe(int argc, const char **argv, const char *prefix __used) 227int cmd_probe(int argc, const char **argv, const char *prefix __used)
224{ 228{
225 int i, ret; 229 int i, ret;
226#ifndef NO_LIBDWARF 230#ifndef NO_DWARF_SUPPORT
227 int fd; 231 int fd;
228#endif 232#endif
229 struct probe_point *pp; 233 struct probe_point *pp;
@@ -259,7 +263,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
259 return 0; 263 return 0;
260 } 264 }
261 265
262#ifndef NO_LIBDWARF 266#ifndef NO_DWARF_SUPPORT
263 if (session.show_lines) { 267 if (session.show_lines) {
264 if (session.nr_probe != 0 || session.dellist) { 268 if (session.nr_probe != 0 || session.dellist) {
265 pr_warning(" Error: Don't use --line with" 269 pr_warning(" Error: Don't use --line with"
@@ -290,9 +294,9 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
290 init_vmlinux(); 294 init_vmlinux();
291 295
292 if (session.need_dwarf) 296 if (session.need_dwarf)
293#ifdef NO_LIBDWARF 297#ifdef NO_DWARF_SUPPORT
294 die("Debuginfo-analysis is not supported"); 298 die("Debuginfo-analysis is not supported");
295#else /* !NO_LIBDWARF */ 299#else /* !NO_DWARF_SUPPORT */
296 pr_debug("Some probes require debuginfo.\n"); 300 pr_debug("Some probes require debuginfo.\n");
297 301
298 fd = open_vmlinux(); 302 fd = open_vmlinux();
@@ -312,7 +316,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
312 continue; 316 continue;
313 317
314 lseek(fd, SEEK_SET, 0); 318 lseek(fd, SEEK_SET, 0);
315 ret = find_probepoint(fd, pp); 319 ret = find_probe_point(fd, pp);
316 if (ret > 0) 320 if (ret > 0)
317 continue; 321 continue;
318 if (ret == 0) { /* No error but failed to find probe point. */ 322 if (ret == 0) { /* No error but failed to find probe point. */
@@ -333,7 +337,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
333 close(fd); 337 close(fd);
334 338
335end_dwarf: 339end_dwarf:
336#endif /* !NO_LIBDWARF */ 340#endif /* !NO_DWARF_SUPPORT */
337 341
338 /* Synthesize probes without dwarf */ 342 /* Synthesize probes without dwarf */
339 for (i = 0; i < session.nr_probe; i++) { 343 for (i = 0; i < session.nr_probe; i++) {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 8f0568849691..c971e81e9cbf 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -119,14 +119,14 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
119 char c, nc = 0; 119 char c, nc = 0;
120 /* 120 /*
121 * <Syntax> 121 * <Syntax>
122 * perf probe [EVENT=]SRC:LN 122 * perf probe [EVENT=]SRC[:LN|;PTN]
123 * perf probe [EVENT=]FUNC[+OFFS|%return][@SRC] 123 * perf probe [EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT]
124 * 124 *
125 * TODO:Group name support 125 * TODO:Group name support
126 */ 126 */
127 127
128 ptr = strchr(arg, '='); 128 ptr = strpbrk(arg, ";=@+%");
129 if (ptr) { /* Event name */ 129 if (ptr && *ptr == '=') { /* Event name */
130 *ptr = '\0'; 130 *ptr = '\0';
131 tmp = ptr + 1; 131 tmp = ptr + 1;
132 ptr = strchr(arg, ':'); 132 ptr = strchr(arg, ':');
@@ -139,7 +139,7 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
139 arg = tmp; 139 arg = tmp;
140 } 140 }
141 141
142 ptr = strpbrk(arg, ":+@%"); 142 ptr = strpbrk(arg, ";:+@%");
143 if (ptr) { 143 if (ptr) {
144 nc = *ptr; 144 nc = *ptr;
145 *ptr++ = '\0'; 145 *ptr++ = '\0';
@@ -156,7 +156,11 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
156 while (ptr) { 156 while (ptr) {
157 arg = ptr; 157 arg = ptr;
158 c = nc; 158 c = nc;
159 ptr = strpbrk(arg, ":+@%"); 159 if (c == ';') { /* Lazy pattern must be the last part */
160 pp->lazy_line = strdup(arg);
161 break;
162 }
163 ptr = strpbrk(arg, ";:+@%");
160 if (ptr) { 164 if (ptr) {
161 nc = *ptr; 165 nc = *ptr;
162 *ptr++ = '\0'; 166 *ptr++ = '\0';
@@ -165,13 +169,13 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
165 case ':': /* Line number */ 169 case ':': /* Line number */
166 pp->line = strtoul(arg, &tmp, 0); 170 pp->line = strtoul(arg, &tmp, 0);
167 if (*tmp != '\0') 171 if (*tmp != '\0')
168 semantic_error("There is non-digit charactor" 172 semantic_error("There is non-digit char"
169 " in line number."); 173 " in line number.");
170 break; 174 break;
171 case '+': /* Byte offset from a symbol */ 175 case '+': /* Byte offset from a symbol */
172 pp->offset = strtoul(arg, &tmp, 0); 176 pp->offset = strtoul(arg, &tmp, 0);
173 if (*tmp != '\0') 177 if (*tmp != '\0')
174 semantic_error("There is non-digit charactor" 178 semantic_error("There is non-digit character"
175 " in offset."); 179 " in offset.");
176 break; 180 break;
177 case '@': /* File name */ 181 case '@': /* File name */
@@ -179,9 +183,6 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
179 semantic_error("SRC@SRC is not allowed."); 183 semantic_error("SRC@SRC is not allowed.");
180 pp->file = strdup(arg); 184 pp->file = strdup(arg);
181 DIE_IF(pp->file == NULL); 185 DIE_IF(pp->file == NULL);
182 if (ptr)
183 semantic_error("@SRC must be the last "
184 "option.");
185 break; 186 break;
186 case '%': /* Probe places */ 187 case '%': /* Probe places */
187 if (strcmp(arg, "return") == 0) { 188 if (strcmp(arg, "return") == 0) {
@@ -196,11 +197,18 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
196 } 197 }
197 198
198 /* Exclusion check */ 199 /* Exclusion check */
200 if (pp->lazy_line && pp->line)
201 semantic_error("Lazy pattern can't be used with line number.");
202
203 if (pp->lazy_line && pp->offset)
204 semantic_error("Lazy pattern can't be used with offset.");
205
199 if (pp->line && pp->offset) 206 if (pp->line && pp->offset)
200 semantic_error("Offset can't be used with line number."); 207 semantic_error("Offset can't be used with line number.");
201 208
202 if (!pp->line && pp->file && !pp->function) 209 if (!pp->line && !pp->lazy_line && pp->file && !pp->function)
203 semantic_error("File always requires line number."); 210 semantic_error("File always requires line number or "
211 "lazy pattern.");
204 212
205 if (pp->offset && !pp->function) 213 if (pp->offset && !pp->function)
206 semantic_error("Offset requires an entry function."); 214 semantic_error("Offset requires an entry function.");
@@ -208,11 +216,13 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
208 if (pp->retprobe && !pp->function) 216 if (pp->retprobe && !pp->function)
209 semantic_error("Return probe requires an entry function."); 217 semantic_error("Return probe requires an entry function.");
210 218
211 if ((pp->offset || pp->line) && pp->retprobe) 219 if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe)
212 semantic_error("Offset/Line can't be used with return probe."); 220 semantic_error("Offset/Line/Lazy pattern can't be used with "
221 "return probe.");
213 222
214 pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n", 223 pr_debug("symbol:%s file:%s line:%d offset:%d return:%d lazy:%s\n",
215 pp->function, pp->file, pp->line, pp->offset, pp->retprobe); 224 pp->function, pp->file, pp->line, pp->offset, pp->retprobe,
225 pp->lazy_line);
216} 226}
217 227
218/* Parse perf-probe event definition */ 228/* Parse perf-probe event definition */
@@ -458,6 +468,8 @@ static void clear_probe_point(struct probe_point *pp)
458 free(pp->function); 468 free(pp->function);
459 if (pp->file) 469 if (pp->file)
460 free(pp->file); 470 free(pp->file);
471 if (pp->lazy_line)
472 free(pp->lazy_line);
461 for (i = 0; i < pp->nr_args; i++) 473 for (i = 0; i < pp->nr_args; i++)
462 free(pp->args[i]); 474 free(pp->args[i]);
463 if (pp->args) 475 if (pp->args)
@@ -719,6 +731,7 @@ void del_trace_kprobe_events(struct strlist *dellist)
719} 731}
720 732
721#define LINEBUF_SIZE 256 733#define LINEBUF_SIZE 256
734#define NR_ADDITIONAL_LINES 2
722 735
723static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num) 736static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num)
724{ 737{
@@ -779,5 +792,11 @@ void show_line_range(struct line_range *lr)
779 show_one_line(fp, (l++) - lr->offset, false, false); 792 show_one_line(fp, (l++) - lr->offset, false, false);
780 show_one_line(fp, (l++) - lr->offset, false, true); 793 show_one_line(fp, (l++) - lr->offset, false, true);
781 } 794 }
795
796 if (lr->end == INT_MAX)
797 lr->end = l + NR_ADDITIONAL_LINES;
798 while (l < lr->end && !feof(fp))
799 show_one_line(fp, (l++) - lr->offset, false, false);
800
782 fclose(fp); 801 fclose(fp);
783} 802}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 1b2124d12f68..e77dc886760e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -32,21 +32,13 @@
32#include <stdarg.h> 32#include <stdarg.h>
33#include <ctype.h> 33#include <ctype.h>
34 34
35#include "string.h"
35#include "event.h" 36#include "event.h"
36#include "debug.h" 37#include "debug.h"
37#include "util.h" 38#include "util.h"
38#include "probe-finder.h" 39#include "probe-finder.h"
39 40
40 41
41/* Dwarf_Die Linkage to parent Die */
42struct die_link {
43 struct die_link *parent; /* Parent die */
44 Dwarf_Die die; /* Current die */
45};
46
47static Dwarf_Debug __dw_debug;
48static Dwarf_Error __dw_error;
49
50/* 42/*
51 * Generic dwarf analysis helpers 43 * Generic dwarf analysis helpers
52 */ 44 */
@@ -113,281 +105,190 @@ static int strtailcmp(const char *s1, const char *s2)
113 return 0; 105 return 0;
114} 106}
115 107
116/* Find the fileno of the target file. */ 108/* Line number list operations */
117static Dwarf_Unsigned cu_find_fileno(Dwarf_Die cu_die, const char *fname)
118{
119 Dwarf_Signed cnt, i;
120 Dwarf_Unsigned found = 0;
121 char **srcs;
122 int ret;
123 109
124 if (!fname) 110/* Add a line to line number list */
125 return 0; 111static void line_list__add_line(struct list_head *head, unsigned int line)
112{
113 struct line_node *ln;
114 struct list_head *p;
126 115
127 ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error); 116 /* Reverse search, because new line will be the last one */
128 if (ret == DW_DLV_OK) { 117 list_for_each_entry_reverse(ln, head, list) {
129 for (i = 0; i < cnt && !found; i++) { 118 if (ln->line < line) {
130 if (strtailcmp(srcs[i], fname) == 0) 119 p = &ln->list;
131 found = i + 1; 120 goto found;
132 dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING); 121 } else if (ln->line == line) /* Already exist */
133 } 122 return ;
134 for (; i < cnt; i++)
135 dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING);
136 dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST);
137 } 123 }
138 if (found) 124 /* List is empty, or the smallest entry */
139 pr_debug("found fno: %d\n", (int)found); 125 p = head;
140 return found; 126found:
127 pr_debug("line list: add a line %u\n", line);
128 ln = zalloc(sizeof(struct line_node));
129 DIE_IF(ln == NULL);
130 ln->line = line;
131 INIT_LIST_HEAD(&ln->list);
132 list_add(&ln->list, p);
141} 133}
142 134
143static int cu_get_filename(Dwarf_Die cu_die, Dwarf_Unsigned fno, char **buf) 135/* Check if the line in line number list */
136static int line_list__has_line(struct list_head *head, unsigned int line)
144{ 137{
145 Dwarf_Signed cnt, i; 138 struct line_node *ln;
146 char **srcs; 139
147 int ret = 0; 140 /* Reverse search, because new line will be the last one */
148 141 list_for_each_entry(ln, head, list)
149 if (!buf || !fno) 142 if (ln->line == line)
150 return -EINVAL; 143 return 1;
151 144
152 ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error); 145 return 0;
153 if (ret == DW_DLV_OK) {
154 if ((Dwarf_Unsigned)cnt > fno - 1) {
155 *buf = strdup(srcs[fno - 1]);
156 ret = 0;
157 pr_debug("found filename: %s\n", *buf);
158 } else
159 ret = -ENOENT;
160 for (i = 0; i < cnt; i++)
161 dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING);
162 dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST);
163 } else
164 ret = -EINVAL;
165 return ret;
166} 146}
167 147
168/* Compare diename and tname */ 148/* Init line number list */
169static int die_compare_name(Dwarf_Die dw_die, const char *tname) 149static void line_list__init(struct list_head *head)
170{ 150{
171 char *name; 151 INIT_LIST_HEAD(head);
172 int ret;
173 ret = dwarf_diename(dw_die, &name, &__dw_error);
174 DIE_IF(ret == DW_DLV_ERROR);
175 if (ret == DW_DLV_OK) {
176 ret = strcmp(tname, name);
177 dwarf_dealloc(__dw_debug, name, DW_DLA_STRING);
178 } else
179 ret = -1;
180 return ret;
181} 152}
182 153
183/* Check the address is in the subprogram(function). */ 154/* Free line number list */
184static int die_within_subprogram(Dwarf_Die sp_die, Dwarf_Addr addr, 155static void line_list__free(struct list_head *head)
185 Dwarf_Signed *offs)
186{ 156{
187 Dwarf_Addr lopc, hipc; 157 struct line_node *ln;
188 int ret; 158 while (!list_empty(head)) {
189 159 ln = list_first_entry(head, struct line_node, list);
190 /* TODO: check ranges */ 160 list_del(&ln->list);
191 ret = dwarf_lowpc(sp_die, &lopc, &__dw_error); 161 free(ln);
192 DIE_IF(ret == DW_DLV_ERROR); 162 }
193 if (ret == DW_DLV_NO_ENTRY)
194 return 0;
195 ret = dwarf_highpc(sp_die, &hipc, &__dw_error);
196 DIE_IF(ret != DW_DLV_OK);
197 if (lopc <= addr && addr < hipc) {
198 *offs = addr - lopc;
199 return 1;
200 } else
201 return 0;
202} 163}
203 164
204/* Check the die is inlined function */ 165/* Dwarf wrappers */
205static Dwarf_Bool die_inlined_subprogram(Dwarf_Die dw_die) 166
167/* Find the realpath of the target file. */
168static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
206{ 169{
207 /* TODO: check strictly */ 170 Dwarf_Files *files;
208 Dwarf_Bool inl; 171 size_t nfiles, i;
172 const char *src;
209 int ret; 173 int ret;
210 174
211 ret = dwarf_hasattr(dw_die, DW_AT_inline, &inl, &__dw_error); 175 if (!fname)
212 DIE_IF(ret == DW_DLV_ERROR); 176 return NULL;
213 return inl;
214}
215 177
216/* Get the offset of abstruct_origin */ 178 ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
217static Dwarf_Off die_get_abstract_origin(Dwarf_Die dw_die) 179 if (ret != 0)
218{ 180 return NULL;
219 Dwarf_Attribute attr;
220 Dwarf_Off cu_offs;
221 int ret;
222 181
223 ret = dwarf_attr(dw_die, DW_AT_abstract_origin, &attr, &__dw_error); 182 for (i = 0; i < nfiles; i++) {
224 DIE_IF(ret != DW_DLV_OK); 183 src = dwarf_filesrc(files, i, NULL, NULL);
225 ret = dwarf_formref(attr, &cu_offs, &__dw_error); 184 if (strtailcmp(src, fname) == 0)
226 DIE_IF(ret != DW_DLV_OK); 185 break;
227 dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); 186 }
228 return cu_offs; 187 return src;
229} 188}
230 189
231/* Get entry pc(or low pc, 1st entry of ranges) of the die */ 190struct __addr_die_search_param {
232static Dwarf_Addr die_get_entrypc(Dwarf_Die dw_die) 191 Dwarf_Addr addr;
192 Dwarf_Die *die_mem;
193};
194
195static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
233{ 196{
234 Dwarf_Attribute attr; 197 struct __addr_die_search_param *ad = data;
235 Dwarf_Addr addr;
236 Dwarf_Off offs;
237 Dwarf_Ranges *ranges;
238 Dwarf_Signed cnt;
239 int ret;
240 198
241 /* Try to get entry pc */ 199 if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
242 ret = dwarf_attr(dw_die, DW_AT_entry_pc, &attr, &__dw_error); 200 dwarf_haspc(fn_die, ad->addr)) {
243 DIE_IF(ret == DW_DLV_ERROR); 201 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
244 if (ret == DW_DLV_OK) { 202 return DWARF_CB_ABORT;
245 ret = dwarf_formaddr(attr, &addr, &__dw_error);
246 DIE_IF(ret != DW_DLV_OK);
247 dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
248 return addr;
249 } 203 }
204 return DWARF_CB_OK;
205}
250 206
251 /* Try to get low pc */ 207/* Search a real subprogram including this line, */
252 ret = dwarf_lowpc(dw_die, &addr, &__dw_error); 208static Dwarf_Die *die_get_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
253 DIE_IF(ret == DW_DLV_ERROR); 209 Dwarf_Die *die_mem)
254 if (ret == DW_DLV_OK) 210{
255 return addr; 211 struct __addr_die_search_param ad;
256 212 ad.addr = addr;
257 /* Try to get ranges */ 213 ad.die_mem = die_mem;
258 ret = dwarf_attr(dw_die, DW_AT_ranges, &attr, &__dw_error); 214 /* dwarf_getscopes can't find subprogram. */
259 DIE_IF(ret != DW_DLV_OK); 215 if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
260 ret = dwarf_formref(attr, &offs, &__dw_error); 216 return NULL;
261 DIE_IF(ret != DW_DLV_OK); 217 else
262 ret = dwarf_get_ranges(__dw_debug, offs, &ranges, &cnt, NULL, 218 return die_mem;
263 &__dw_error);
264 DIE_IF(ret != DW_DLV_OK);
265 addr = ranges[0].dwr_addr1;
266 dwarf_ranges_dealloc(__dw_debug, ranges, cnt);
267 return addr;
268} 219}
269 220
270/* 221/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */
271 * Search a Die from Die tree. 222static Dwarf_Die *die_get_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
272 * Note: cur_link->die should be deallocated in this function. 223 Dwarf_Die *die_mem)
273 */
274static int __search_die_tree(struct die_link *cur_link,
275 int (*die_cb)(struct die_link *, void *),
276 void *data)
277{ 224{
278 Dwarf_Die new_die; 225 Dwarf_Die child_die;
279 struct die_link new_link;
280 int ret; 226 int ret;
281 227
282 if (!die_cb) 228 ret = dwarf_child(sp_die, die_mem);
283 return 0; 229 if (ret != 0)
284 230 return NULL;
285 /* Check current die */
286 while (!(ret = die_cb(cur_link, data))) {
287 /* Check child die */
288 ret = dwarf_child(cur_link->die, &new_die, &__dw_error);
289 DIE_IF(ret == DW_DLV_ERROR);
290 if (ret == DW_DLV_OK) {
291 new_link.parent = cur_link;
292 new_link.die = new_die;
293 ret = __search_die_tree(&new_link, die_cb, data);
294 if (ret)
295 break;
296 }
297 231
298 /* Move to next sibling */ 232 do {
299 ret = dwarf_siblingof(__dw_debug, cur_link->die, &new_die, 233 if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
300 &__dw_error); 234 dwarf_haspc(die_mem, addr))
301 DIE_IF(ret == DW_DLV_ERROR); 235 return die_mem;
302 dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE);
303 cur_link->die = new_die;
304 if (ret == DW_DLV_NO_ENTRY)
305 return 0;
306 }
307 dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE);
308 return ret;
309}
310 236
311/* Search a die in its children's die tree */ 237 if (die_get_inlinefunc(die_mem, addr, &child_die)) {
312static int search_die_from_children(Dwarf_Die parent_die, 238 memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
313 int (*die_cb)(struct die_link *, void *), 239 return die_mem;
314 void *data) 240 }
315{ 241 } while (dwarf_siblingof(die_mem, die_mem) == 0);
316 struct die_link new_link;
317 int ret;
318 242
319 new_link.parent = NULL; 243 return NULL;
320 ret = dwarf_child(parent_die, &new_link.die, &__dw_error);
321 DIE_IF(ret == DW_DLV_ERROR);
322 if (ret == DW_DLV_OK)
323 return __search_die_tree(&new_link, die_cb, data);
324 else
325 return 0;
326} 244}
327 245
328/* Find a locdesc corresponding to the address */ 246/* Compare diename and tname */
329static int attr_get_locdesc(Dwarf_Attribute attr, Dwarf_Locdesc *desc, 247static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
330 Dwarf_Addr addr)
331{ 248{
332 Dwarf_Signed lcnt; 249 const char *name;
333 Dwarf_Locdesc **llbuf; 250 name = dwarf_diename(dw_die);
334 int ret, i; 251 DIE_IF(name == NULL);
335 252 return strcmp(tname, name);
336 ret = dwarf_loclist_n(attr, &llbuf, &lcnt, &__dw_error);
337 DIE_IF(ret != DW_DLV_OK);
338 ret = DW_DLV_NO_ENTRY;
339 for (i = 0; i < lcnt; ++i) {
340 if (llbuf[i]->ld_lopc <= addr &&
341 llbuf[i]->ld_hipc > addr) {
342 memcpy(desc, llbuf[i], sizeof(Dwarf_Locdesc));
343 desc->ld_s =
344 malloc(sizeof(Dwarf_Loc) * llbuf[i]->ld_cents);
345 DIE_IF(desc->ld_s == NULL);
346 memcpy(desc->ld_s, llbuf[i]->ld_s,
347 sizeof(Dwarf_Loc) * llbuf[i]->ld_cents);
348 ret = DW_DLV_OK;
349 break;
350 }
351 dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK);
352 dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC);
353 }
354 /* Releasing loop */
355 for (; i < lcnt; ++i) {
356 dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK);
357 dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC);
358 }
359 dwarf_dealloc(__dw_debug, llbuf, DW_DLA_LIST);
360 return ret;
361} 253}
362 254
363/* Get decl_file attribute value (file number) */ 255/* Get entry pc(or low pc, 1st entry of ranges) of the die */
364static Dwarf_Unsigned die_get_decl_file(Dwarf_Die sp_die) 256static Dwarf_Addr die_get_entrypc(Dwarf_Die *dw_die)
365{ 257{
366 Dwarf_Attribute attr; 258 Dwarf_Addr epc;
367 Dwarf_Unsigned fno;
368 int ret; 259 int ret;
369 260
370 ret = dwarf_attr(sp_die, DW_AT_decl_file, &attr, &__dw_error); 261 ret = dwarf_entrypc(dw_die, &epc);
371 DIE_IF(ret != DW_DLV_OK); 262 DIE_IF(ret == -1);
372 dwarf_formudata(attr, &fno, &__dw_error); 263 return epc;
373 DIE_IF(ret != DW_DLV_OK);
374 dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
375 return fno;
376} 264}
377 265
378/* Get decl_line attribute value (line number) */ 266/* Get a variable die */
379static Dwarf_Unsigned die_get_decl_line(Dwarf_Die sp_die) 267static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
268 Dwarf_Die *die_mem)
380{ 269{
381 Dwarf_Attribute attr; 270 Dwarf_Die child_die;
382 Dwarf_Unsigned lno; 271 int tag;
383 int ret; 272 int ret;
384 273
385 ret = dwarf_attr(sp_die, DW_AT_decl_line, &attr, &__dw_error); 274 ret = dwarf_child(sp_die, die_mem);
386 DIE_IF(ret != DW_DLV_OK); 275 if (ret != 0)
387 dwarf_formudata(attr, &lno, &__dw_error); 276 return NULL;
388 DIE_IF(ret != DW_DLV_OK); 277
389 dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); 278 do {
390 return lno; 279 tag = dwarf_tag(die_mem);
280 if ((tag == DW_TAG_formal_parameter ||
281 tag == DW_TAG_variable) &&
282 (die_compare_name(die_mem, name) == 0))
283 return die_mem;
284
285 if (die_find_variable(die_mem, name, &child_die)) {
286 memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
287 return die_mem;
288 }
289 } while (dwarf_siblingof(die_mem, die_mem) == 0);
290
291 return NULL;
391} 292}
392 293
393/* 294/*
@@ -395,47 +296,45 @@ static Dwarf_Unsigned die_get_decl_line(Dwarf_Die sp_die)
395 */ 296 */
396 297
397/* Show a location */ 298/* Show a location */
398static void show_location(Dwarf_Loc *loc, struct probe_finder *pf) 299static void show_location(Dwarf_Op *op, struct probe_finder *pf)
399{ 300{
400 Dwarf_Small op; 301 unsigned int regn;
401 Dwarf_Unsigned regn; 302 Dwarf_Word offs = 0;
402 Dwarf_Signed offs;
403 int deref = 0, ret; 303 int deref = 0, ret;
404 const char *regs; 304 const char *regs;
405 305
406 op = loc->lr_atom; 306 /* TODO: support CFA */
407
408 /* If this is based on frame buffer, set the offset */ 307 /* If this is based on frame buffer, set the offset */
409 if (op == DW_OP_fbreg) { 308 if (op->atom == DW_OP_fbreg) {
309 if (pf->fb_ops == NULL)
310 die("The attribute of frame base is not supported.\n");
410 deref = 1; 311 deref = 1;
411 offs = (Dwarf_Signed)loc->lr_number; 312 offs = op->number;
412 op = pf->fbloc.ld_s[0].lr_atom; 313 op = &pf->fb_ops[0];
413 loc = &pf->fbloc.ld_s[0]; 314 }
414 } else
415 offs = 0;
416 315
417 if (op >= DW_OP_breg0 && op <= DW_OP_breg31) { 316 if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
418 regn = op - DW_OP_breg0; 317 regn = op->atom - DW_OP_breg0;
419 offs += (Dwarf_Signed)loc->lr_number; 318 offs += op->number;
420 deref = 1; 319 deref = 1;
421 } else if (op >= DW_OP_reg0 && op <= DW_OP_reg31) { 320 } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) {
422 regn = op - DW_OP_reg0; 321 regn = op->atom - DW_OP_reg0;
423 } else if (op == DW_OP_bregx) { 322 } else if (op->atom == DW_OP_bregx) {
424 regn = loc->lr_number; 323 regn = op->number;
425 offs += (Dwarf_Signed)loc->lr_number2; 324 offs += op->number2;
426 deref = 1; 325 deref = 1;
427 } else if (op == DW_OP_regx) { 326 } else if (op->atom == DW_OP_regx) {
428 regn = loc->lr_number; 327 regn = op->number;
429 } else 328 } else
430 die("Dwarf_OP %d is not supported.", op); 329 die("DW_OP %d is not supported.", op->atom);
431 330
432 regs = get_arch_regstr(regn); 331 regs = get_arch_regstr(regn);
433 if (!regs) 332 if (!regs)
434 die("%lld exceeds max register number.", regn); 333 die("%u exceeds max register number.", regn);
435 334
436 if (deref) 335 if (deref)
437 ret = snprintf(pf->buf, pf->len, 336 ret = snprintf(pf->buf, pf->len, " %s=+%ju(%s)",
438 " %s=%+lld(%s)", pf->var, offs, regs); 337 pf->var, (uintmax_t)offs, regs);
439 else 338 else
440 ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs); 339 ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs);
441 DIE_IF(ret < 0); 340 DIE_IF(ret < 0);
@@ -443,52 +342,37 @@ static void show_location(Dwarf_Loc *loc, struct probe_finder *pf)
443} 342}
444 343
445/* Show a variables in kprobe event format */ 344/* Show a variables in kprobe event format */
446static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf) 345static void show_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
447{ 346{
448 Dwarf_Attribute attr; 347 Dwarf_Attribute attr;
449 Dwarf_Locdesc ld; 348 Dwarf_Op *expr;
349 size_t nexpr;
450 int ret; 350 int ret;
451 351
452 ret = dwarf_attr(vr_die, DW_AT_location, &attr, &__dw_error); 352 if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
453 if (ret != DW_DLV_OK)
454 goto error; 353 goto error;
455 ret = attr_get_locdesc(attr, &ld, (pf->addr - pf->cu_base)); 354 /* TODO: handle more than 1 exprs */
456 if (ret != DW_DLV_OK) 355 ret = dwarf_getlocation_addr(&attr, (pf->addr - pf->cu_base),
356 &expr, &nexpr, 1);
357 if (ret <= 0 || nexpr == 0)
457 goto error; 358 goto error;
458 /* TODO? */ 359
459 DIE_IF(ld.ld_cents != 1); 360 show_location(expr, pf);
460 show_location(&ld.ld_s[0], pf); 361 /* *expr will be cached in libdw. Don't free it. */
461 free(ld.ld_s);
462 dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
463 return ; 362 return ;
464error: 363error:
364 /* TODO: Support const_value */
465 die("Failed to find the location of %s at this address.\n" 365 die("Failed to find the location of %s at this address.\n"
466 " Perhaps, it has been optimized out.", pf->var); 366 " Perhaps, it has been optimized out.", pf->var);
467} 367}
468 368
469static int variable_callback(struct die_link *dlink, void *data)
470{
471 struct probe_finder *pf = (struct probe_finder *)data;
472 Dwarf_Half tag;
473 int ret;
474
475 ret = dwarf_tag(dlink->die, &tag, &__dw_error);
476 DIE_IF(ret == DW_DLV_ERROR);
477 if ((tag == DW_TAG_formal_parameter ||
478 tag == DW_TAG_variable) &&
479 (die_compare_name(dlink->die, pf->var) == 0)) {
480 show_variable(dlink->die, pf);
481 return 1;
482 }
483 /* TODO: Support struct members and arrays */
484 return 0;
485}
486
487/* Find a variable in a subprogram die */ 369/* Find a variable in a subprogram die */
488static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) 370static void find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
489{ 371{
490 int ret; 372 int ret;
373 Dwarf_Die vr_die;
491 374
375 /* TODO: Support struct members and arrays */
492 if (!is_c_varname(pf->var)) { 376 if (!is_c_varname(pf->var)) {
493 /* Output raw parameters */ 377 /* Output raw parameters */
494 ret = snprintf(pf->buf, pf->len, " %s", pf->var); 378 ret = snprintf(pf->buf, pf->len, " %s", pf->var);
@@ -499,58 +383,51 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf)
499 383
500 pr_debug("Searching '%s' variable in context.\n", pf->var); 384 pr_debug("Searching '%s' variable in context.\n", pf->var);
501 /* Search child die for local variables and parameters. */ 385 /* Search child die for local variables and parameters. */
502 ret = search_die_from_children(sp_die, variable_callback, pf); 386 if (!die_find_variable(sp_die, pf->var, &vr_die))
503 if (!ret)
504 die("Failed to find '%s' in this function.", pf->var); 387 die("Failed to find '%s' in this function.", pf->var);
505}
506
507/* Get a frame base on the address */
508static void get_current_frame_base(Dwarf_Die sp_die, struct probe_finder *pf)
509{
510 Dwarf_Attribute attr;
511 int ret;
512 388
513 ret = dwarf_attr(sp_die, DW_AT_frame_base, &attr, &__dw_error); 389 show_variable(&vr_die, pf);
514 DIE_IF(ret != DW_DLV_OK);
515 ret = attr_get_locdesc(attr, &pf->fbloc, (pf->addr - pf->cu_base));
516 DIE_IF(ret != DW_DLV_OK);
517 dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR);
518}
519
520static void free_current_frame_base(struct probe_finder *pf)
521{
522 free(pf->fbloc.ld_s);
523 memset(&pf->fbloc, 0, sizeof(Dwarf_Locdesc));
524} 390}
525 391
526/* Show a probe point to output buffer */ 392/* Show a probe point to output buffer */
527static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs, 393static void show_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
528 struct probe_finder *pf)
529{ 394{
530 struct probe_point *pp = pf->pp; 395 struct probe_point *pp = pf->pp;
531 char *name; 396 Dwarf_Addr eaddr;
397 Dwarf_Die die_mem;
398 const char *name;
532 char tmp[MAX_PROBE_BUFFER]; 399 char tmp[MAX_PROBE_BUFFER];
533 int ret, i, len; 400 int ret, i, len;
401 Dwarf_Attribute fb_attr;
402 size_t nops;
403
404 /* If no real subprogram, find a real one */
405 if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
406 sp_die = die_get_real_subprogram(&pf->cu_die,
407 pf->addr, &die_mem);
408 if (!sp_die)
409 die("Probe point is not found in subprograms.");
410 }
534 411
535 /* Output name of probe point */ 412 /* Output name of probe point */
536 ret = dwarf_diename(sp_die, &name, &__dw_error); 413 name = dwarf_diename(sp_die);
537 DIE_IF(ret == DW_DLV_ERROR); 414 if (name) {
538 if (ret == DW_DLV_OK) { 415 dwarf_entrypc(sp_die, &eaddr);
539 ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%u", name, 416 ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%lu", name,
540 (unsigned int)offs); 417 (unsigned long)(pf->addr - eaddr));
541 /* Copy the function name if possible */ 418 /* Copy the function name if possible */
542 if (!pp->function) { 419 if (!pp->function) {
543 pp->function = strdup(name); 420 pp->function = strdup(name);
544 pp->offset = offs; 421 pp->offset = (size_t)(pf->addr - eaddr);
545 } 422 }
546 dwarf_dealloc(__dw_debug, name, DW_DLA_STRING);
547 } else { 423 } else {
548 /* This function has no name. */ 424 /* This function has no name. */
549 ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%llx", pf->addr); 425 ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%jx",
426 (uintmax_t)pf->addr);
550 if (!pp->function) { 427 if (!pp->function) {
551 /* TODO: Use _stext */ 428 /* TODO: Use _stext */
552 pp->function = strdup(""); 429 pp->function = strdup("");
553 pp->offset = (int)pf->addr; 430 pp->offset = (size_t)pf->addr;
554 } 431 }
555 } 432 }
556 DIE_IF(ret < 0); 433 DIE_IF(ret < 0);
@@ -558,8 +435,15 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs,
558 len = ret; 435 len = ret;
559 pr_debug("Probe point found: %s\n", tmp); 436 pr_debug("Probe point found: %s\n", tmp);
560 437
438 /* Get the frame base attribute/ops */
439 dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
440 ret = dwarf_getlocation_addr(&fb_attr, (pf->addr - pf->cu_base),
441 &pf->fb_ops, &nops, 1);
442 if (ret <= 0 || nops == 0)
443 pf->fb_ops = NULL;
444
561 /* Find each argument */ 445 /* Find each argument */
562 get_current_frame_base(sp_die, pf); 446 /* TODO: use dwarf_cfi_addrframe */
563 for (i = 0; i < pp->nr_args; i++) { 447 for (i = 0; i < pp->nr_args; i++) {
564 pf->var = pp->args[i]; 448 pf->var = pp->args[i];
565 pf->buf = &tmp[len]; 449 pf->buf = &tmp[len];
@@ -567,289 +451,327 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs,
567 find_variable(sp_die, pf); 451 find_variable(sp_die, pf);
568 len += strlen(pf->buf); 452 len += strlen(pf->buf);
569 } 453 }
570 free_current_frame_base(pf); 454
455 /* *pf->fb_ops will be cached in libdw. Don't free it. */
456 pf->fb_ops = NULL;
571 457
572 pp->probes[pp->found] = strdup(tmp); 458 pp->probes[pp->found] = strdup(tmp);
573 pp->found++; 459 pp->found++;
574} 460}
575 461
576static int probeaddr_callback(struct die_link *dlink, void *data) 462/* Find probe point from its line number */
463static void find_probe_point_by_line(struct probe_finder *pf)
577{ 464{
578 struct probe_finder *pf = (struct probe_finder *)data; 465 Dwarf_Lines *lines;
579 Dwarf_Half tag; 466 Dwarf_Line *line;
580 Dwarf_Signed offs; 467 size_t nlines, i;
468 Dwarf_Addr addr;
469 int lineno;
581 int ret; 470 int ret;
582 471
583 ret = dwarf_tag(dlink->die, &tag, &__dw_error); 472 ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines);
584 DIE_IF(ret == DW_DLV_ERROR); 473 DIE_IF(ret != 0);
585 /* Check the address is in this subprogram */ 474
586 if (tag == DW_TAG_subprogram && 475 for (i = 0; i < nlines; i++) {
587 die_within_subprogram(dlink->die, pf->addr, &offs)) { 476 line = dwarf_onesrcline(lines, i);
588 show_probepoint(dlink->die, offs, pf); 477 dwarf_lineno(line, &lineno);
589 return 1; 478 if (lineno != pf->lno)
479 continue;
480
481 /* TODO: Get fileno from line, but how? */
482 if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
483 continue;
484
485 ret = dwarf_lineaddr(line, &addr);
486 DIE_IF(ret != 0);
487 pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
488 (int)i, lineno, (uintmax_t)addr);
489 pf->addr = addr;
490
491 show_probe_point(NULL, pf);
492 /* Continuing, because target line might be inlined. */
590 } 493 }
591 return 0;
592} 494}
593 495
594/* Find probe point from its line number */ 496/* Find lines which match lazy pattern */
595static void find_probe_point_by_line(struct probe_finder *pf) 497static int find_lazy_match_lines(struct list_head *head,
498 const char *fname, const char *pat)
596{ 499{
597 Dwarf_Signed cnt, i, clm; 500 char *fbuf, *p1, *p2;
598 Dwarf_Line *lines; 501 int fd, line, nlines = 0;
599 Dwarf_Unsigned lineno = 0; 502 struct stat st;
503
504 fd = open(fname, O_RDONLY);
505 if (fd < 0)
506 die("failed to open %s", fname);
507 DIE_IF(fstat(fd, &st) < 0);
508 fbuf = malloc(st.st_size + 2);
509 DIE_IF(fbuf == NULL);
510 DIE_IF(read(fd, fbuf, st.st_size) < 0);
511 close(fd);
512 fbuf[st.st_size] = '\n'; /* Dummy line */
513 fbuf[st.st_size + 1] = '\0';
514 p1 = fbuf;
515 line = 1;
516 while ((p2 = strchr(p1, '\n')) != NULL) {
517 *p2 = '\0';
518 if (strlazymatch(p1, pat)) {
519 line_list__add_line(head, line);
520 nlines++;
521 }
522 line++;
523 p1 = p2 + 1;
524 }
525 free(fbuf);
526 return nlines;
527}
528
529/* Find probe points from lazy pattern */
530static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
531{
532 Dwarf_Lines *lines;
533 Dwarf_Line *line;
534 size_t nlines, i;
600 Dwarf_Addr addr; 535 Dwarf_Addr addr;
601 Dwarf_Unsigned fno; 536 Dwarf_Die die_mem;
537 int lineno;
602 int ret; 538 int ret;
603 539
604 ret = dwarf_srclines(pf->cu_die, &lines, &cnt, &__dw_error); 540 if (list_empty(&pf->lcache)) {
605 DIE_IF(ret != DW_DLV_OK); 541 /* Matching lazy line pattern */
542 ret = find_lazy_match_lines(&pf->lcache, pf->fname,
543 pf->pp->lazy_line);
544 if (ret <= 0)
545 die("No matched lines found in %s.", pf->fname);
546 }
547
548 ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines);
549 DIE_IF(ret != 0);
550 for (i = 0; i < nlines; i++) {
551 line = dwarf_onesrcline(lines, i);
606 552
607 for (i = 0; i < cnt; i++) { 553 dwarf_lineno(line, &lineno);
608 ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error); 554 if (!line_list__has_line(&pf->lcache, lineno))
609 DIE_IF(ret != DW_DLV_OK);
610 if (fno != pf->fno)
611 continue; 555 continue;
612 556
613 ret = dwarf_lineno(lines[i], &lineno, &__dw_error); 557 /* TODO: Get fileno from line, but how? */
614 DIE_IF(ret != DW_DLV_OK); 558 if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
615 if (lineno != pf->lno)
616 continue; 559 continue;
617 560
618 ret = dwarf_lineoff(lines[i], &clm, &__dw_error); 561 ret = dwarf_lineaddr(line, &addr);
619 DIE_IF(ret != DW_DLV_OK); 562 DIE_IF(ret != 0);
563 if (sp_die) {
564 /* Address filtering 1: does sp_die include addr? */
565 if (!dwarf_haspc(sp_die, addr))
566 continue;
567 /* Address filtering 2: No child include addr? */
568 if (die_get_inlinefunc(sp_die, addr, &die_mem))
569 continue;
570 }
620 571
621 ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); 572 pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n",
622 DIE_IF(ret != DW_DLV_OK); 573 (int)i, lineno, (unsigned long long)addr);
623 pr_debug("Probe line found: line[%d]:%u,%d addr:0x%llx\n",
624 (int)i, (unsigned)lineno, (int)clm, addr);
625 pf->addr = addr; 574 pf->addr = addr;
626 /* Search a real subprogram including this line, */ 575
627 ret = search_die_from_children(pf->cu_die, 576 show_probe_point(sp_die, pf);
628 probeaddr_callback, pf);
629 if (ret == 0)
630 die("Probe point is not found in subprograms.");
631 /* Continuing, because target line might be inlined. */ 577 /* Continuing, because target line might be inlined. */
632 } 578 }
633 dwarf_srclines_dealloc(__dw_debug, lines, cnt); 579 /* TODO: deallocate lines, but how? */
580}
581
582static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
583{
584 struct probe_finder *pf = (struct probe_finder *)data;
585 struct probe_point *pp = pf->pp;
586
587 if (pp->lazy_line)
588 find_probe_point_lazy(in_die, pf);
589 else {
590 /* Get probe address */
591 pf->addr = die_get_entrypc(in_die);
592 pf->addr += pp->offset;
593 pr_debug("found inline addr: 0x%jx\n",
594 (uintmax_t)pf->addr);
595
596 show_probe_point(in_die, pf);
597 }
598
599 return DWARF_CB_OK;
634} 600}
635 601
636/* Search function from function name */ 602/* Search function from function name */
637static int probefunc_callback(struct die_link *dlink, void *data) 603static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
638{ 604{
639 struct probe_finder *pf = (struct probe_finder *)data; 605 struct probe_finder *pf = (struct probe_finder *)data;
640 struct probe_point *pp = pf->pp; 606 struct probe_point *pp = pf->pp;
641 struct die_link *lk;
642 Dwarf_Signed offs;
643 Dwarf_Half tag;
644 int ret;
645 607
646 ret = dwarf_tag(dlink->die, &tag, &__dw_error); 608 /* Check tag and diename */
647 DIE_IF(ret == DW_DLV_ERROR); 609 if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
648 if (tag == DW_TAG_subprogram) { 610 die_compare_name(sp_die, pp->function) != 0)
649 if (die_compare_name(dlink->die, pp->function) == 0) { 611 return 0;
650 if (pp->line) { /* Function relative line */ 612
651 pf->fno = die_get_decl_file(dlink->die); 613 pf->fname = dwarf_decl_file(sp_die);
652 pf->lno = die_get_decl_line(dlink->die) 614 if (pp->line) { /* Function relative line */
653 + pp->line; 615 dwarf_decl_line(sp_die, &pf->lno);
654 find_probe_point_by_line(pf); 616 pf->lno += pp->line;
655 return 1; 617 find_probe_point_by_line(pf);
656 } 618 } else if (!dwarf_func_inline(sp_die)) {
657 if (die_inlined_subprogram(dlink->die)) { 619 /* Real function */
658 /* Inlined function, save it. */ 620 if (pp->lazy_line)
659 ret = dwarf_die_CU_offset(dlink->die, 621 find_probe_point_lazy(sp_die, pf);
660 &pf->inl_offs, 622 else {
661 &__dw_error); 623 pf->addr = die_get_entrypc(sp_die);
662 DIE_IF(ret != DW_DLV_OK);
663 pr_debug("inline definition offset %lld\n",
664 pf->inl_offs);
665 return 0; /* Continue to search */
666 }
667 /* Get probe address */
668 pf->addr = die_get_entrypc(dlink->die);
669 pf->addr += pp->offset; 624 pf->addr += pp->offset;
670 /* TODO: Check the address in this function */ 625 /* TODO: Check the address in this function */
671 show_probepoint(dlink->die, pp->offset, pf); 626 show_probe_point(sp_die, pf);
672 return 1; /* Exit; no same symbol in this CU. */
673 }
674 } else if (tag == DW_TAG_inlined_subroutine && pf->inl_offs) {
675 if (die_get_abstract_origin(dlink->die) == pf->inl_offs) {
676 /* Get probe address */
677 pf->addr = die_get_entrypc(dlink->die);
678 pf->addr += pp->offset;
679 pr_debug("found inline addr: 0x%llx\n", pf->addr);
680 /* Inlined function. Get a real subprogram */
681 for (lk = dlink->parent; lk != NULL; lk = lk->parent) {
682 tag = 0;
683 dwarf_tag(lk->die, &tag, &__dw_error);
684 DIE_IF(ret == DW_DLV_ERROR);
685 if (tag == DW_TAG_subprogram &&
686 !die_inlined_subprogram(lk->die))
687 goto found;
688 }
689 die("Failed to find real subprogram.");
690found:
691 /* Get offset from subprogram */
692 ret = die_within_subprogram(lk->die, pf->addr, &offs);
693 DIE_IF(!ret);
694 show_probepoint(lk->die, offs, pf);
695 /* Continue to search */
696 } 627 }
697 } 628 } else
698 return 0; 629 /* Inlined function: search instances */
630 dwarf_func_inline_instances(sp_die, probe_point_inline_cb, pf);
631
632 return 1; /* Exit; no same symbol in this CU. */
699} 633}
700 634
701static void find_probe_point_by_func(struct probe_finder *pf) 635static void find_probe_point_by_func(struct probe_finder *pf)
702{ 636{
703 search_die_from_children(pf->cu_die, probefunc_callback, pf); 637 dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, pf, 0);
704} 638}
705 639
706/* Find a probe point */ 640/* Find a probe point */
707int find_probepoint(int fd, struct probe_point *pp) 641int find_probe_point(int fd, struct probe_point *pp)
708{ 642{
709 Dwarf_Half addr_size = 0;
710 Dwarf_Unsigned next_cuh = 0;
711 int cu_number = 0, ret;
712 struct probe_finder pf = {.pp = pp}; 643 struct probe_finder pf = {.pp = pp};
644 int ret;
645 Dwarf_Off off, noff;
646 size_t cuhl;
647 Dwarf_Die *diep;
648 Dwarf *dbg;
713 649
714 ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); 650 dbg = dwarf_begin(fd, DWARF_C_READ);
715 if (ret != DW_DLV_OK) 651 if (!dbg)
716 return -ENOENT; 652 return -ENOENT;
717 653
718 pp->found = 0; 654 pp->found = 0;
719 while (++cu_number) { 655 off = 0;
720 /* Search CU (Compilation Unit) */ 656 line_list__init(&pf.lcache);
721 ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL, 657 /* Loop on CUs (Compilation Unit) */
722 &addr_size, &next_cuh, &__dw_error); 658 while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
723 DIE_IF(ret == DW_DLV_ERROR);
724 if (ret == DW_DLV_NO_ENTRY)
725 break;
726
727 /* Get the DIE(Debugging Information Entry) of this CU */ 659 /* Get the DIE(Debugging Information Entry) of this CU */
728 ret = dwarf_siblingof(__dw_debug, 0, &pf.cu_die, &__dw_error); 660 diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
729 DIE_IF(ret != DW_DLV_OK); 661 if (!diep)
662 continue;
730 663
731 /* Check if target file is included. */ 664 /* Check if target file is included. */
732 if (pp->file) 665 if (pp->file)
733 pf.fno = cu_find_fileno(pf.cu_die, pp->file); 666 pf.fname = cu_find_realpath(&pf.cu_die, pp->file);
667 else
668 pf.fname = NULL;
734 669
735 if (!pp->file || pf.fno) { 670 if (!pp->file || pf.fname) {
736 /* Save CU base address (for frame_base) */ 671 /* Save CU base address (for frame_base) */
737 ret = dwarf_lowpc(pf.cu_die, &pf.cu_base, &__dw_error); 672 ret = dwarf_lowpc(&pf.cu_die, &pf.cu_base);
738 DIE_IF(ret == DW_DLV_ERROR); 673 if (ret != 0)
739 if (ret == DW_DLV_NO_ENTRY)
740 pf.cu_base = 0; 674 pf.cu_base = 0;
741 if (pp->function) 675 if (pp->function)
742 find_probe_point_by_func(&pf); 676 find_probe_point_by_func(&pf);
677 else if (pp->lazy_line)
678 find_probe_point_lazy(NULL, &pf);
743 else { 679 else {
744 pf.lno = pp->line; 680 pf.lno = pp->line;
745 find_probe_point_by_line(&pf); 681 find_probe_point_by_line(&pf);
746 } 682 }
747 } 683 }
748 dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE); 684 off = noff;
749 } 685 }
750 ret = dwarf_finish(__dw_debug, &__dw_error); 686 line_list__free(&pf.lcache);
751 DIE_IF(ret != DW_DLV_OK); 687 dwarf_end(dbg);
752 688
753 return pp->found; 689 return pp->found;
754} 690}
755 691
756
757static void line_range_add_line(struct line_range *lr, unsigned int line)
758{
759 struct line_node *ln;
760 struct list_head *p;
761
762 /* Reverse search, because new line will be the last one */
763 list_for_each_entry_reverse(ln, &lr->line_list, list) {
764 if (ln->line < line) {
765 p = &ln->list;
766 goto found;
767 } else if (ln->line == line) /* Already exist */
768 return ;
769 }
770 /* List is empty, or the smallest entry */
771 p = &lr->line_list;
772found:
773 pr_debug("Debug: add a line %u\n", line);
774 ln = zalloc(sizeof(struct line_node));
775 DIE_IF(ln == NULL);
776 ln->line = line;
777 INIT_LIST_HEAD(&ln->list);
778 list_add(&ln->list, p);
779}
780
781/* Find line range from its line number */ 692/* Find line range from its line number */
782static void find_line_range_by_line(struct line_finder *lf) 693static void find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
783{ 694{
784 Dwarf_Signed cnt, i; 695 Dwarf_Lines *lines;
785 Dwarf_Line *lines; 696 Dwarf_Line *line;
786 Dwarf_Unsigned lineno = 0; 697 size_t nlines, i;
787 Dwarf_Unsigned fno;
788 Dwarf_Addr addr; 698 Dwarf_Addr addr;
699 int lineno;
789 int ret; 700 int ret;
701 const char *src;
702 Dwarf_Die die_mem;
790 703
791 ret = dwarf_srclines(lf->cu_die, &lines, &cnt, &__dw_error); 704 line_list__init(&lf->lr->line_list);
792 DIE_IF(ret != DW_DLV_OK); 705 ret = dwarf_getsrclines(&lf->cu_die, &lines, &nlines);
706 DIE_IF(ret != 0);
793 707
794 for (i = 0; i < cnt; i++) { 708 for (i = 0; i < nlines; i++) {
795 ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error); 709 line = dwarf_onesrcline(lines, i);
796 DIE_IF(ret != DW_DLV_OK); 710 ret = dwarf_lineno(line, &lineno);
797 if (fno != lf->fno) 711 DIE_IF(ret != 0);
798 continue;
799
800 ret = dwarf_lineno(lines[i], &lineno, &__dw_error);
801 DIE_IF(ret != DW_DLV_OK);
802 if (lf->lno_s > lineno || lf->lno_e < lineno) 712 if (lf->lno_s > lineno || lf->lno_e < lineno)
803 continue; 713 continue;
804 714
805 /* Filter line in the function address range */ 715 if (sp_die) {
806 if (lf->addr_s && lf->addr_e) { 716 /* Address filtering 1: does sp_die include addr? */
807 ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); 717 ret = dwarf_lineaddr(line, &addr);
808 DIE_IF(ret != DW_DLV_OK); 718 DIE_IF(ret != 0);
809 if (lf->addr_s > addr || lf->addr_e <= addr) 719 if (!dwarf_haspc(sp_die, addr))
720 continue;
721
722 /* Address filtering 2: No child include addr? */
723 if (die_get_inlinefunc(sp_die, addr, &die_mem))
810 continue; 724 continue;
811 } 725 }
812 line_range_add_line(lf->lr, (unsigned int)lineno); 726
727 /* TODO: Get fileno from line, but how? */
728 src = dwarf_linesrc(line, NULL, NULL);
729 if (strtailcmp(src, lf->fname) != 0)
730 continue;
731
732 /* Copy real path */
733 if (!lf->lr->path)
734 lf->lr->path = strdup(src);
735 line_list__add_line(&lf->lr->line_list, (unsigned int)lineno);
813 } 736 }
814 dwarf_srclines_dealloc(__dw_debug, lines, cnt); 737 /* Update status */
815 if (!list_empty(&lf->lr->line_list)) 738 if (!list_empty(&lf->lr->line_list))
816 lf->found = 1; 739 lf->found = 1;
740 else {
741 free(lf->lr->path);
742 lf->lr->path = NULL;
743 }
744}
745
746static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
747{
748 find_line_range_by_line(in_die, (struct line_finder *)data);
749 return DWARF_CB_ABORT; /* No need to find other instances */
817} 750}
818 751
819/* Search function from function name */ 752/* Search function from function name */
820static int linefunc_callback(struct die_link *dlink, void *data) 753static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
821{ 754{
822 struct line_finder *lf = (struct line_finder *)data; 755 struct line_finder *lf = (struct line_finder *)data;
823 struct line_range *lr = lf->lr; 756 struct line_range *lr = lf->lr;
824 Dwarf_Half tag;
825 int ret;
826 757
827 ret = dwarf_tag(dlink->die, &tag, &__dw_error); 758 if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
828 DIE_IF(ret == DW_DLV_ERROR); 759 die_compare_name(sp_die, lr->function) == 0) {
829 if (tag == DW_TAG_subprogram && 760 lf->fname = dwarf_decl_file(sp_die);
830 die_compare_name(dlink->die, lr->function) == 0) { 761 dwarf_decl_line(sp_die, &lr->offset);
831 /* Get the address range of this function */ 762 pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
832 ret = dwarf_highpc(dlink->die, &lf->addr_e, &__dw_error);
833 if (ret == DW_DLV_OK)
834 ret = dwarf_lowpc(dlink->die, &lf->addr_s, &__dw_error);
835 DIE_IF(ret == DW_DLV_ERROR);
836 if (ret == DW_DLV_NO_ENTRY) {
837 lf->addr_s = 0;
838 lf->addr_e = 0;
839 }
840
841 lf->fno = die_get_decl_file(dlink->die);
842 lr->offset = die_get_decl_line(dlink->die);;
843 lf->lno_s = lr->offset + lr->start; 763 lf->lno_s = lr->offset + lr->start;
844 if (!lr->end) 764 if (!lr->end)
845 lf->lno_e = (Dwarf_Unsigned)-1; 765 lf->lno_e = INT_MAX;
846 else 766 else
847 lf->lno_e = lr->offset + lr->end; 767 lf->lno_e = lr->offset + lr->end;
848 lr->start = lf->lno_s; 768 lr->start = lf->lno_s;
849 lr->end = lf->lno_e; 769 lr->end = lf->lno_e;
850 find_line_range_by_line(lf); 770 if (dwarf_func_inline(sp_die))
851 /* If we find a target function, this should be end. */ 771 dwarf_func_inline_instances(sp_die,
852 lf->found = 1; 772 line_range_inline_cb, lf);
773 else
774 find_line_range_by_line(sp_die, lf);
853 return 1; 775 return 1;
854 } 776 }
855 return 0; 777 return 0;
@@ -857,55 +779,55 @@ static int linefunc_callback(struct die_link *dlink, void *data)
857 779
858static void find_line_range_by_func(struct line_finder *lf) 780static void find_line_range_by_func(struct line_finder *lf)
859{ 781{
860 search_die_from_children(lf->cu_die, linefunc_callback, lf); 782 dwarf_getfuncs(&lf->cu_die, line_range_search_cb, lf, 0);
861} 783}
862 784
863int find_line_range(int fd, struct line_range *lr) 785int find_line_range(int fd, struct line_range *lr)
864{ 786{
865 Dwarf_Half addr_size = 0; 787 struct line_finder lf = {.lr = lr, .found = 0};
866 Dwarf_Unsigned next_cuh = 0;
867 int ret; 788 int ret;
868 struct line_finder lf = {.lr = lr}; 789 Dwarf_Off off = 0, noff;
790 size_t cuhl;
791 Dwarf_Die *diep;
792 Dwarf *dbg;
869 793
870 ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); 794 dbg = dwarf_begin(fd, DWARF_C_READ);
871 if (ret != DW_DLV_OK) 795 if (!dbg)
872 return -ENOENT; 796 return -ENOENT;
873 797
798 /* Loop on CUs (Compilation Unit) */
874 while (!lf.found) { 799 while (!lf.found) {
875 /* Search CU (Compilation Unit) */ 800 ret = dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL);
876 ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL, 801 if (ret != 0)
877 &addr_size, &next_cuh, &__dw_error);
878 DIE_IF(ret == DW_DLV_ERROR);
879 if (ret == DW_DLV_NO_ENTRY)
880 break; 802 break;
881 803
882 /* Get the DIE(Debugging Information Entry) of this CU */ 804 /* Get the DIE(Debugging Information Entry) of this CU */
883 ret = dwarf_siblingof(__dw_debug, 0, &lf.cu_die, &__dw_error); 805 diep = dwarf_offdie(dbg, off + cuhl, &lf.cu_die);
884 DIE_IF(ret != DW_DLV_OK); 806 if (!diep)
807 continue;
885 808
886 /* Check if target file is included. */ 809 /* Check if target file is included. */
887 if (lr->file) 810 if (lr->file)
888 lf.fno = cu_find_fileno(lf.cu_die, lr->file); 811 lf.fname = cu_find_realpath(&lf.cu_die, lr->file);
812 else
813 lf.fname = 0;
889 814
890 if (!lr->file || lf.fno) { 815 if (!lr->file || lf.fname) {
891 if (lr->function) 816 if (lr->function)
892 find_line_range_by_func(&lf); 817 find_line_range_by_func(&lf);
893 else { 818 else {
894 lf.lno_s = lr->start; 819 lf.lno_s = lr->start;
895 if (!lr->end) 820 if (!lr->end)
896 lf.lno_e = (Dwarf_Unsigned)-1; 821 lf.lno_e = INT_MAX;
897 else 822 else
898 lf.lno_e = lr->end; 823 lf.lno_e = lr->end;
899 find_line_range_by_line(&lf); 824 find_line_range_by_line(NULL, &lf);
900 } 825 }
901 /* Get the real file path */
902 if (lf.found)
903 cu_get_filename(lf.cu_die, lf.fno, &lr->path);
904 } 826 }
905 dwarf_dealloc(__dw_debug, lf.cu_die, DW_DLA_DIE); 827 off = noff;
906 } 828 }
907 ret = dwarf_finish(__dw_debug, &__dw_error); 829 pr_debug("path: %lx\n", (unsigned long)lr->path);
908 DIE_IF(ret != DW_DLV_OK); 830 dwarf_end(dbg);
909 return lf.found; 831 return lf.found;
910} 832}
911 833
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 972b386116f1..d1a651793ba6 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -1,6 +1,7 @@
1#ifndef _PROBE_FINDER_H 1#ifndef _PROBE_FINDER_H
2#define _PROBE_FINDER_H 2#define _PROBE_FINDER_H
3 3
4#include <stdbool.h>
4#include "util.h" 5#include "util.h"
5 6
6#define MAX_PATH_LEN 256 7#define MAX_PATH_LEN 256
@@ -20,6 +21,7 @@ struct probe_point {
20 /* Inputs */ 21 /* Inputs */
21 char *file; /* File name */ 22 char *file; /* File name */
22 int line; /* Line number */ 23 int line; /* Line number */
24 char *lazy_line; /* Lazy line pattern */
23 25
24 char *function; /* Function name */ 26 char *function; /* Function name */
25 int offset; /* Offset bytes */ 27 int offset; /* Offset bytes */
@@ -46,53 +48,46 @@ struct line_range {
46 char *function; /* Function name */ 48 char *function; /* Function name */
47 unsigned int start; /* Start line number */ 49 unsigned int start; /* Start line number */
48 unsigned int end; /* End line number */ 50 unsigned int end; /* End line number */
49 unsigned int offset; /* Start line offset */ 51 int offset; /* Start line offset */
50 char *path; /* Real path name */ 52 char *path; /* Real path name */
51 struct list_head line_list; /* Visible lines */ 53 struct list_head line_list; /* Visible lines */
52}; 54};
53 55
54#ifndef NO_LIBDWARF 56#ifndef NO_DWARF_SUPPORT
55extern int find_probepoint(int fd, struct probe_point *pp); 57extern int find_probe_point(int fd, struct probe_point *pp);
56extern int find_line_range(int fd, struct line_range *lr); 58extern int find_line_range(int fd, struct line_range *lr);
57 59
58/* Workaround for undefined _MIPS_SZLONG bug in libdwarf.h: */
59#ifndef _MIPS_SZLONG
60# define _MIPS_SZLONG 0
61#endif
62
63#include <dwarf.h> 60#include <dwarf.h>
64#include <libdwarf.h> 61#include <libdw.h>
65 62
66struct probe_finder { 63struct probe_finder {
67 struct probe_point *pp; /* Target probe point */ 64 struct probe_point *pp; /* Target probe point */
68 65
69 /* For function searching */ 66 /* For function searching */
70 Dwarf_Addr addr; /* Address */ 67 Dwarf_Addr addr; /* Address */
71 Dwarf_Unsigned fno; /* File number */ 68 const char *fname; /* File name */
72 Dwarf_Unsigned lno; /* Line number */ 69 int lno; /* Line number */
73 Dwarf_Off inl_offs; /* Inline offset */ 70 Dwarf_Die cu_die; /* Current CU */
74 Dwarf_Die cu_die; /* Current CU */
75 71
76 /* For variable searching */ 72 /* For variable searching */
77 Dwarf_Addr cu_base; /* Current CU base address */ 73 Dwarf_Op *fb_ops; /* Frame base attribute */
78 Dwarf_Locdesc fbloc; /* Location of Current Frame Base */ 74 Dwarf_Addr cu_base; /* Current CU base address */
79 const char *var; /* Current variable name */ 75 const char *var; /* Current variable name */
80 char *buf; /* Current output buffer */ 76 char *buf; /* Current output buffer */
81 int len; /* Length of output buffer */ 77 int len; /* Length of output buffer */
78 struct list_head lcache; /* Line cache for lazy match */
82}; 79};
83 80
84struct line_finder { 81struct line_finder {
85 struct line_range *lr; /* Target line range */ 82 struct line_range *lr; /* Target line range */
86 83
87 Dwarf_Unsigned fno; /* File number */ 84 const char *fname; /* File name */
88 Dwarf_Unsigned lno_s; /* Start line number */ 85 int lno_s; /* Start line number */
89 Dwarf_Unsigned lno_e; /* End line number */ 86 int lno_e; /* End line number */
90 Dwarf_Addr addr_s; /* Start address */ 87 Dwarf_Die cu_die; /* Current CU */
91 Dwarf_Addr addr_e; /* End address */
92 Dwarf_Die cu_die; /* Current CU */
93 int found; 88 int found;
94}; 89};
95 90
96#endif /* NO_LIBDWARF */ 91#endif /* NO_DWARF_SUPPORT */
97 92
98#endif /*_PROBE_FINDER_H */ 93#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index c397d4f6f748..a175949ed216 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -265,21 +265,21 @@ error:
265 return false; 265 return false;
266} 266}
267 267
268/** 268/* Glob/lazy pattern matching */
269 * strglobmatch - glob expression pattern matching 269static bool __match_glob(const char *str, const char *pat, bool ignore_space)
270 * @str: the target string to match
271 * @pat: the pattern string to match
272 *
273 * This returns true if the @str matches @pat. @pat can includes wildcards
274 * ('*','?') and character classes ([CHARS], complementation and ranges are
275 * also supported). Also, this supports escape character ('\') to use special
276 * characters as normal character.
277 *
278 * Note: if @pat syntax is broken, this always returns false.
279 */
280bool strglobmatch(const char *str, const char *pat)
281{ 270{
282 while (*str && *pat && *pat != '*') { 271 while (*str && *pat && *pat != '*') {
272 if (ignore_space) {
273 /* Ignore spaces for lazy matching */
274 if (isspace(*str)) {
275 str++;
276 continue;
277 }
278 if (isspace(*pat)) {
279 pat++;
280 continue;
281 }
282 }
283 if (*pat == '?') { /* Matches any single character */ 283 if (*pat == '?') { /* Matches any single character */
284 str++; 284 str++;
285 pat++; 285 pat++;
@@ -308,3 +308,32 @@ bool strglobmatch(const char *str, const char *pat)
308 return !*str && !*pat; 308 return !*str && !*pat;
309} 309}
310 310
311/**
312 * strglobmatch - glob expression pattern matching
313 * @str: the target string to match
314 * @pat: the pattern string to match
315 *
316 * This returns true if the @str matches @pat. @pat can includes wildcards
317 * ('*','?') and character classes ([CHARS], complementation and ranges are
318 * also supported). Also, this supports escape character ('\') to use special
319 * characters as normal character.
320 *
321 * Note: if @pat syntax is broken, this always returns false.
322 */
323bool strglobmatch(const char *str, const char *pat)
324{
325 return __match_glob(str, pat, false);
326}
327
328/**
329 * strlazymatch - matching pattern strings lazily with glob pattern
330 * @str: the target string to match
331 * @pat: the pattern string to match
332 *
333 * This is similar to strglobmatch, except this ignores spaces in
334 * the target string.
335 */
336bool strlazymatch(const char *str, const char *pat)
337{
338 return __match_glob(str, pat, true);
339}
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index 02ede58c54b4..542e44de3719 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -10,6 +10,7 @@ s64 perf_atoll(const char *str);
10char **argv_split(const char *str, int *argcp); 10char **argv_split(const char *str, int *argcp);
11void argv_free(char **argv); 11void argv_free(char **argv);
12bool strglobmatch(const char *str, const char *pat); 12bool strglobmatch(const char *str, const char *pat);
13bool strlazymatch(const char *str, const char *pat);
13 14
14#define _STR(x) #x 15#define _STR(x) #x
15#define STR(x) _STR(x) 16#define STR(x) _STR(x)
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index daece36c0a57..7f1178f6b839 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -12,3 +12,6 @@ config HAVE_KVM_EVENTFD
12 12
13config KVM_APIC_ARCHITECTURE 13config KVM_APIC_ARCHITECTURE
14 bool 14 bool
15
16config KVM_MMIO
17 bool
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index f73de631e3ee..057e2cca6af5 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -504,12 +504,12 @@ out:
504static int kvm_vm_ioctl_assign_device(struct kvm *kvm, 504static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
505 struct kvm_assigned_pci_dev *assigned_dev) 505 struct kvm_assigned_pci_dev *assigned_dev)
506{ 506{
507 int r = 0; 507 int r = 0, idx;
508 struct kvm_assigned_dev_kernel *match; 508 struct kvm_assigned_dev_kernel *match;
509 struct pci_dev *dev; 509 struct pci_dev *dev;
510 510
511 mutex_lock(&kvm->lock); 511 mutex_lock(&kvm->lock);
512 down_read(&kvm->slots_lock); 512 idx = srcu_read_lock(&kvm->srcu);
513 513
514 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 514 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
515 assigned_dev->assigned_dev_id); 515 assigned_dev->assigned_dev_id);
@@ -526,7 +526,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
526 r = -ENOMEM; 526 r = -ENOMEM;
527 goto out; 527 goto out;
528 } 528 }
529 dev = pci_get_bus_and_slot(assigned_dev->busnr, 529 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
530 assigned_dev->busnr,
530 assigned_dev->devfn); 531 assigned_dev->devfn);
531 if (!dev) { 532 if (!dev) {
532 printk(KERN_INFO "%s: host device not found\n", __func__); 533 printk(KERN_INFO "%s: host device not found\n", __func__);
@@ -548,6 +549,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
548 pci_reset_function(dev); 549 pci_reset_function(dev);
549 550
550 match->assigned_dev_id = assigned_dev->assigned_dev_id; 551 match->assigned_dev_id = assigned_dev->assigned_dev_id;
552 match->host_segnr = assigned_dev->segnr;
551 match->host_busnr = assigned_dev->busnr; 553 match->host_busnr = assigned_dev->busnr;
552 match->host_devfn = assigned_dev->devfn; 554 match->host_devfn = assigned_dev->devfn;
553 match->flags = assigned_dev->flags; 555 match->flags = assigned_dev->flags;
@@ -573,7 +575,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
573 } 575 }
574 576
575out: 577out:
576 up_read(&kvm->slots_lock); 578 srcu_read_unlock(&kvm->srcu, idx);
577 mutex_unlock(&kvm->lock); 579 mutex_unlock(&kvm->lock);
578 return r; 580 return r;
579out_list_del: 581out_list_del:
@@ -585,7 +587,7 @@ out_put:
585 pci_dev_put(dev); 587 pci_dev_put(dev);
586out_free: 588out_free:
587 kfree(match); 589 kfree(match);
588 up_read(&kvm->slots_lock); 590 srcu_read_unlock(&kvm->srcu, idx);
589 mutex_unlock(&kvm->lock); 591 mutex_unlock(&kvm->lock);
590 return r; 592 return r;
591} 593}
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 04d69cd7049b..5169736377a3 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -92,41 +92,64 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
92int kvm_coalesced_mmio_init(struct kvm *kvm) 92int kvm_coalesced_mmio_init(struct kvm *kvm)
93{ 93{
94 struct kvm_coalesced_mmio_dev *dev; 94 struct kvm_coalesced_mmio_dev *dev;
95 struct page *page;
95 int ret; 96 int ret;
96 97
98 ret = -ENOMEM;
99 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
100 if (!page)
101 goto out_err;
102 kvm->coalesced_mmio_ring = page_address(page);
103
104 ret = -ENOMEM;
97 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); 105 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
98 if (!dev) 106 if (!dev)
99 return -ENOMEM; 107 goto out_free_page;
100 spin_lock_init(&dev->lock); 108 spin_lock_init(&dev->lock);
101 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); 109 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
102 dev->kvm = kvm; 110 dev->kvm = kvm;
103 kvm->coalesced_mmio_dev = dev; 111 kvm->coalesced_mmio_dev = dev;
104 112
105 ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev); 113 mutex_lock(&kvm->slots_lock);
114 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
115 mutex_unlock(&kvm->slots_lock);
106 if (ret < 0) 116 if (ret < 0)
107 kfree(dev); 117 goto out_free_dev;
118
119 return ret;
108 120
121out_free_dev:
122 kfree(dev);
123out_free_page:
124 __free_page(page);
125out_err:
109 return ret; 126 return ret;
110} 127}
111 128
129void kvm_coalesced_mmio_free(struct kvm *kvm)
130{
131 if (kvm->coalesced_mmio_ring)
132 free_page((unsigned long)kvm->coalesced_mmio_ring);
133}
134
112int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 135int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
113 struct kvm_coalesced_mmio_zone *zone) 136 struct kvm_coalesced_mmio_zone *zone)
114{ 137{
115 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; 138 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
116 139
117 if (dev == NULL) 140 if (dev == NULL)
118 return -EINVAL; 141 return -EINVAL;
119 142
120 down_write(&kvm->slots_lock); 143 mutex_lock(&kvm->slots_lock);
121 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { 144 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
122 up_write(&kvm->slots_lock); 145 mutex_unlock(&kvm->slots_lock);
123 return -ENOBUFS; 146 return -ENOBUFS;
124 } 147 }
125 148
126 dev->zone[dev->nb_zones] = *zone; 149 dev->zone[dev->nb_zones] = *zone;
127 dev->nb_zones++; 150 dev->nb_zones++;
128 151
129 up_write(&kvm->slots_lock); 152 mutex_unlock(&kvm->slots_lock);
130 return 0; 153 return 0;
131} 154}
132 155
@@ -140,10 +163,10 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
140 if (dev == NULL) 163 if (dev == NULL)
141 return -EINVAL; 164 return -EINVAL;
142 165
143 down_write(&kvm->slots_lock); 166 mutex_lock(&kvm->slots_lock);
144 167
145 i = dev->nb_zones; 168 i = dev->nb_zones;
146 while(i) { 169 while (i) {
147 z = &dev->zone[i - 1]; 170 z = &dev->zone[i - 1];
148 171
149 /* unregister all zones 172 /* unregister all zones
@@ -158,7 +181,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
158 i--; 181 i--;
159 } 182 }
160 183
161 up_write(&kvm->slots_lock); 184 mutex_unlock(&kvm->slots_lock);
162 185
163 return 0; 186 return 0;
164} 187}
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 4b49f27fa31e..8a5959e3535f 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -1,3 +1,6 @@
1#ifndef __KVM_COALESCED_MMIO_H__
2#define __KVM_COALESCED_MMIO_H__
3
1/* 4/*
2 * KVM coalesced MMIO 5 * KVM coalesced MMIO
3 * 6 *
@@ -7,6 +10,8 @@
7 * 10 *
8 */ 11 */
9 12
13#ifdef CONFIG_KVM_MMIO
14
10#define KVM_COALESCED_MMIO_ZONE_MAX 100 15#define KVM_COALESCED_MMIO_ZONE_MAX 100
11 16
12struct kvm_coalesced_mmio_dev { 17struct kvm_coalesced_mmio_dev {
@@ -18,7 +23,17 @@ struct kvm_coalesced_mmio_dev {
18}; 23};
19 24
20int kvm_coalesced_mmio_init(struct kvm *kvm); 25int kvm_coalesced_mmio_init(struct kvm *kvm);
26void kvm_coalesced_mmio_free(struct kvm *kvm);
21int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 27int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
22 struct kvm_coalesced_mmio_zone *zone); 28 struct kvm_coalesced_mmio_zone *zone);
23int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, 29int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
24 struct kvm_coalesced_mmio_zone *zone); 30 struct kvm_coalesced_mmio_zone *zone);
31
32#else
33
34static inline int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; }
35static inline void kvm_coalesced_mmio_free(struct kvm *kvm) { }
36
37#endif
38
39#endif
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a9d3fc6c681c..7016319b1ec0 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -47,7 +47,6 @@ struct _irqfd {
47 int gsi; 47 int gsi;
48 struct list_head list; 48 struct list_head list;
49 poll_table pt; 49 poll_table pt;
50 wait_queue_head_t *wqh;
51 wait_queue_t wait; 50 wait_queue_t wait;
52 struct work_struct inject; 51 struct work_struct inject;
53 struct work_struct shutdown; 52 struct work_struct shutdown;
@@ -159,8 +158,6 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
159 poll_table *pt) 158 poll_table *pt)
160{ 159{
161 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt); 160 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
162
163 irqfd->wqh = wqh;
164 add_wait_queue(wqh, &irqfd->wait); 161 add_wait_queue(wqh, &irqfd->wait);
165} 162}
166 163
@@ -463,7 +460,7 @@ static int
463kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 460kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
464{ 461{
465 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; 462 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
466 struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus; 463 enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
467 struct _ioeventfd *p; 464 struct _ioeventfd *p;
468 struct eventfd_ctx *eventfd; 465 struct eventfd_ctx *eventfd;
469 int ret; 466 int ret;
@@ -508,7 +505,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
508 else 505 else
509 p->wildcard = true; 506 p->wildcard = true;
510 507
511 down_write(&kvm->slots_lock); 508 mutex_lock(&kvm->slots_lock);
512 509
513 /* Verify that there isnt a match already */ 510 /* Verify that there isnt a match already */
514 if (ioeventfd_check_collision(kvm, p)) { 511 if (ioeventfd_check_collision(kvm, p)) {
@@ -518,18 +515,18 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
518 515
519 kvm_iodevice_init(&p->dev, &ioeventfd_ops); 516 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
520 517
521 ret = __kvm_io_bus_register_dev(bus, &p->dev); 518 ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
522 if (ret < 0) 519 if (ret < 0)
523 goto unlock_fail; 520 goto unlock_fail;
524 521
525 list_add_tail(&p->list, &kvm->ioeventfds); 522 list_add_tail(&p->list, &kvm->ioeventfds);
526 523
527 up_write(&kvm->slots_lock); 524 mutex_unlock(&kvm->slots_lock);
528 525
529 return 0; 526 return 0;
530 527
531unlock_fail: 528unlock_fail:
532 up_write(&kvm->slots_lock); 529 mutex_unlock(&kvm->slots_lock);
533 530
534fail: 531fail:
535 kfree(p); 532 kfree(p);
@@ -542,7 +539,7 @@ static int
542kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) 539kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
543{ 540{
544 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; 541 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
545 struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus; 542 enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
546 struct _ioeventfd *p, *tmp; 543 struct _ioeventfd *p, *tmp;
547 struct eventfd_ctx *eventfd; 544 struct eventfd_ctx *eventfd;
548 int ret = -ENOENT; 545 int ret = -ENOENT;
@@ -551,7 +548,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
551 if (IS_ERR(eventfd)) 548 if (IS_ERR(eventfd))
552 return PTR_ERR(eventfd); 549 return PTR_ERR(eventfd);
553 550
554 down_write(&kvm->slots_lock); 551 mutex_lock(&kvm->slots_lock);
555 552
556 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { 553 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
557 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); 554 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
@@ -565,13 +562,13 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
565 if (!p->wildcard && p->datamatch != args->datamatch) 562 if (!p->wildcard && p->datamatch != args->datamatch)
566 continue; 563 continue;
567 564
568 __kvm_io_bus_unregister_dev(bus, &p->dev); 565 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
569 ioeventfd_release(p); 566 ioeventfd_release(p);
570 ret = 0; 567 ret = 0;
571 break; 568 break;
572 } 569 }
573 570
574 up_write(&kvm->slots_lock); 571 mutex_unlock(&kvm->slots_lock);
575 572
576 eventfd_ctx_put(eventfd); 573 eventfd_ctx_put(eventfd);
577 574
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 38a2d20b89de..3db15a807f80 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -100,6 +100,19 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
100 return injected; 100 return injected;
101} 101}
102 102
103static void update_handled_vectors(struct kvm_ioapic *ioapic)
104{
105 DECLARE_BITMAP(handled_vectors, 256);
106 int i;
107
108 memset(handled_vectors, 0, sizeof(handled_vectors));
109 for (i = 0; i < IOAPIC_NUM_PINS; ++i)
110 __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
111 memcpy(ioapic->handled_vectors, handled_vectors,
112 sizeof(handled_vectors));
113 smp_wmb();
114}
115
103static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 116static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
104{ 117{
105 unsigned index; 118 unsigned index;
@@ -134,6 +147,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
134 e->bits |= (u32) val; 147 e->bits |= (u32) val;
135 e->fields.remote_irr = 0; 148 e->fields.remote_irr = 0;
136 } 149 }
150 update_handled_vectors(ioapic);
137 mask_after = e->fields.mask; 151 mask_after = e->fields.mask;
138 if (mask_before != mask_after) 152 if (mask_before != mask_after)
139 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); 153 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
@@ -241,6 +255,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
241{ 255{
242 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 256 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
243 257
258 smp_rmb();
259 if (!test_bit(vector, ioapic->handled_vectors))
260 return;
244 mutex_lock(&ioapic->lock); 261 mutex_lock(&ioapic->lock);
245 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); 262 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
246 mutex_unlock(&ioapic->lock); 263 mutex_unlock(&ioapic->lock);
@@ -352,6 +369,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
352 ioapic->ioregsel = 0; 369 ioapic->ioregsel = 0;
353 ioapic->irr = 0; 370 ioapic->irr = 0;
354 ioapic->id = 0; 371 ioapic->id = 0;
372 update_handled_vectors(ioapic);
355} 373}
356 374
357static const struct kvm_io_device_ops ioapic_mmio_ops = { 375static const struct kvm_io_device_ops ioapic_mmio_ops = {
@@ -372,13 +390,28 @@ int kvm_ioapic_init(struct kvm *kvm)
372 kvm_ioapic_reset(ioapic); 390 kvm_ioapic_reset(ioapic);
373 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 391 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
374 ioapic->kvm = kvm; 392 ioapic->kvm = kvm;
375 ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev); 393 mutex_lock(&kvm->slots_lock);
376 if (ret < 0) 394 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
395 mutex_unlock(&kvm->slots_lock);
396 if (ret < 0) {
397 kvm->arch.vioapic = NULL;
377 kfree(ioapic); 398 kfree(ioapic);
399 }
378 400
379 return ret; 401 return ret;
380} 402}
381 403
404void kvm_ioapic_destroy(struct kvm *kvm)
405{
406 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
407
408 if (ioapic) {
409 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
410 kvm->arch.vioapic = NULL;
411 kfree(ioapic);
412 }
413}
414
382int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) 415int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
383{ 416{
384 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); 417 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
@@ -399,6 +432,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
399 432
400 mutex_lock(&ioapic->lock); 433 mutex_lock(&ioapic->lock);
401 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); 434 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
435 update_handled_vectors(ioapic);
402 mutex_unlock(&ioapic->lock); 436 mutex_unlock(&ioapic->lock);
403 return 0; 437 return 0;
404} 438}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 419c43b667ab..8a751b78a430 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -46,6 +46,7 @@ struct kvm_ioapic {
46 struct kvm *kvm; 46 struct kvm *kvm;
47 void (*ack_notifier)(void *opaque, int irq); 47 void (*ack_notifier)(void *opaque, int irq);
48 struct mutex lock; 48 struct mutex lock;
49 DECLARE_BITMAP(handled_vectors, 256);
49}; 50};
50 51
51#ifdef DEBUG 52#ifdef DEBUG
@@ -71,6 +72,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
71int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); 72int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
72void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); 73void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
73int kvm_ioapic_init(struct kvm *kvm); 74int kvm_ioapic_init(struct kvm *kvm);
75void kvm_ioapic_destroy(struct kvm *kvm);
74int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); 76int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
75void kvm_ioapic_reset(struct kvm_ioapic *ioapic); 77void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
76int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, 78int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 15147583abd1..80fd3ad3b2de 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
32static void kvm_iommu_put_pages(struct kvm *kvm, 32static void kvm_iommu_put_pages(struct kvm *kvm,
33 gfn_t base_gfn, unsigned long npages); 33 gfn_t base_gfn, unsigned long npages);
34 34
35int kvm_iommu_map_pages(struct kvm *kvm, 35int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
36 gfn_t base_gfn, unsigned long npages)
37{ 36{
38 gfn_t gfn = base_gfn; 37 gfn_t gfn = slot->base_gfn;
38 unsigned long npages = slot->npages;
39 pfn_t pfn; 39 pfn_t pfn;
40 int i, r = 0; 40 int i, r = 0;
41 struct iommu_domain *domain = kvm->arch.iommu_domain; 41 struct iommu_domain *domain = kvm->arch.iommu_domain;
@@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
54 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) 54 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
55 continue; 55 continue;
56 56
57 pfn = gfn_to_pfn(kvm, gfn); 57 pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
58 r = iommu_map_range(domain, 58 r = iommu_map_range(domain,
59 gfn_to_gpa(gfn), 59 gfn_to_gpa(gfn),
60 pfn_to_hpa(pfn), 60 pfn_to_hpa(pfn),
@@ -69,17 +69,19 @@ int kvm_iommu_map_pages(struct kvm *kvm,
69 return 0; 69 return 0;
70 70
71unmap_pages: 71unmap_pages:
72 kvm_iommu_put_pages(kvm, base_gfn, i); 72 kvm_iommu_put_pages(kvm, slot->base_gfn, i);
73 return r; 73 return r;
74} 74}
75 75
76static int kvm_iommu_map_memslots(struct kvm *kvm) 76static int kvm_iommu_map_memslots(struct kvm *kvm)
77{ 77{
78 int i, r = 0; 78 int i, r = 0;
79 struct kvm_memslots *slots;
80
81 slots = rcu_dereference(kvm->memslots);
79 82
80 for (i = 0; i < kvm->nmemslots; i++) { 83 for (i = 0; i < slots->nmemslots; i++) {
81 r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, 84 r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
82 kvm->memslots[i].npages);
83 if (r) 85 if (r)
84 break; 86 break;
85 } 87 }
@@ -104,7 +106,8 @@ int kvm_assign_device(struct kvm *kvm,
104 106
105 r = iommu_attach_device(domain, &pdev->dev); 107 r = iommu_attach_device(domain, &pdev->dev);
106 if (r) { 108 if (r) {
107 printk(KERN_ERR "assign device %x:%x.%x failed", 109 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
110 pci_domain_nr(pdev->bus),
108 pdev->bus->number, 111 pdev->bus->number,
109 PCI_SLOT(pdev->devfn), 112 PCI_SLOT(pdev->devfn),
110 PCI_FUNC(pdev->devfn)); 113 PCI_FUNC(pdev->devfn));
@@ -125,7 +128,8 @@ int kvm_assign_device(struct kvm *kvm,
125 goto out_unmap; 128 goto out_unmap;
126 } 129 }
127 130
128 printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n", 131 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
132 assigned_dev->host_segnr,
129 assigned_dev->host_busnr, 133 assigned_dev->host_busnr,
130 PCI_SLOT(assigned_dev->host_devfn), 134 PCI_SLOT(assigned_dev->host_devfn),
131 PCI_FUNC(assigned_dev->host_devfn)); 135 PCI_FUNC(assigned_dev->host_devfn));
@@ -152,7 +156,8 @@ int kvm_deassign_device(struct kvm *kvm,
152 156
153 iommu_detach_device(domain, &pdev->dev); 157 iommu_detach_device(domain, &pdev->dev);
154 158
155 printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", 159 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
160 assigned_dev->host_segnr,
156 assigned_dev->host_busnr, 161 assigned_dev->host_busnr,
157 PCI_SLOT(assigned_dev->host_devfn), 162 PCI_SLOT(assigned_dev->host_devfn),
158 PCI_FUNC(assigned_dev->host_devfn)); 163 PCI_FUNC(assigned_dev->host_devfn));
@@ -210,10 +215,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
210static int kvm_iommu_unmap_memslots(struct kvm *kvm) 215static int kvm_iommu_unmap_memslots(struct kvm *kvm)
211{ 216{
212 int i; 217 int i;
218 struct kvm_memslots *slots;
219
220 slots = rcu_dereference(kvm->memslots);
213 221
214 for (i = 0; i < kvm->nmemslots; i++) { 222 for (i = 0; i < slots->nmemslots; i++) {
215 kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, 223 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
216 kvm->memslots[i].npages); 224 slots->memslots[i].npages);
217 } 225 }
218 226
219 return 0; 227 return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a944be392d6e..548f9253c195 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -44,6 +44,8 @@
44#include <linux/bitops.h> 44#include <linux/bitops.h>
45#include <linux/spinlock.h> 45#include <linux/spinlock.h>
46#include <linux/compat.h> 46#include <linux/compat.h>
47#include <linux/srcu.h>
48#include <linux/hugetlb.h>
47 49
48#include <asm/processor.h> 50#include <asm/processor.h>
49#include <asm/io.h> 51#include <asm/io.h>
@@ -51,9 +53,7 @@
51#include <asm/pgtable.h> 53#include <asm/pgtable.h>
52#include <asm-generic/bitops/le.h> 54#include <asm-generic/bitops/le.h>
53 55
54#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
55#include "coalesced_mmio.h" 56#include "coalesced_mmio.h"
56#endif
57 57
58#define CREATE_TRACE_POINTS 58#define CREATE_TRACE_POINTS
59#include <trace/events/kvm.h> 59#include <trace/events/kvm.h>
@@ -86,6 +86,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
86static int hardware_enable_all(void); 86static int hardware_enable_all(void);
87static void hardware_disable_all(void); 87static void hardware_disable_all(void);
88 88
89static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
90
89static bool kvm_rebooting; 91static bool kvm_rebooting;
90 92
91static bool largepages_enabled = true; 93static bool largepages_enabled = true;
@@ -136,7 +138,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
136 138
137 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 139 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
138 140
139 spin_lock(&kvm->requests_lock); 141 raw_spin_lock(&kvm->requests_lock);
140 me = smp_processor_id(); 142 me = smp_processor_id();
141 kvm_for_each_vcpu(i, vcpu, kvm) { 143 kvm_for_each_vcpu(i, vcpu, kvm) {
142 if (test_and_set_bit(req, &vcpu->requests)) 144 if (test_and_set_bit(req, &vcpu->requests))
@@ -151,7 +153,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
151 smp_call_function_many(cpus, ack_flush, NULL, 1); 153 smp_call_function_many(cpus, ack_flush, NULL, 1);
152 else 154 else
153 called = false; 155 called = false;
154 spin_unlock(&kvm->requests_lock); 156 raw_spin_unlock(&kvm->requests_lock);
155 free_cpumask_var(cpus); 157 free_cpumask_var(cpus);
156 return called; 158 return called;
157} 159}
@@ -215,7 +217,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
215 unsigned long address) 217 unsigned long address)
216{ 218{
217 struct kvm *kvm = mmu_notifier_to_kvm(mn); 219 struct kvm *kvm = mmu_notifier_to_kvm(mn);
218 int need_tlb_flush; 220 int need_tlb_flush, idx;
219 221
220 /* 222 /*
221 * When ->invalidate_page runs, the linux pte has been zapped 223 * When ->invalidate_page runs, the linux pte has been zapped
@@ -235,10 +237,12 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
235 * pte after kvm_unmap_hva returned, without noticing the page 237 * pte after kvm_unmap_hva returned, without noticing the page
236 * is going to be freed. 238 * is going to be freed.
237 */ 239 */
240 idx = srcu_read_lock(&kvm->srcu);
238 spin_lock(&kvm->mmu_lock); 241 spin_lock(&kvm->mmu_lock);
239 kvm->mmu_notifier_seq++; 242 kvm->mmu_notifier_seq++;
240 need_tlb_flush = kvm_unmap_hva(kvm, address); 243 need_tlb_flush = kvm_unmap_hva(kvm, address);
241 spin_unlock(&kvm->mmu_lock); 244 spin_unlock(&kvm->mmu_lock);
245 srcu_read_unlock(&kvm->srcu, idx);
242 246
243 /* we've to flush the tlb before the pages can be freed */ 247 /* we've to flush the tlb before the pages can be freed */
244 if (need_tlb_flush) 248 if (need_tlb_flush)
@@ -252,11 +256,14 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
252 pte_t pte) 256 pte_t pte)
253{ 257{
254 struct kvm *kvm = mmu_notifier_to_kvm(mn); 258 struct kvm *kvm = mmu_notifier_to_kvm(mn);
259 int idx;
255 260
261 idx = srcu_read_lock(&kvm->srcu);
256 spin_lock(&kvm->mmu_lock); 262 spin_lock(&kvm->mmu_lock);
257 kvm->mmu_notifier_seq++; 263 kvm->mmu_notifier_seq++;
258 kvm_set_spte_hva(kvm, address, pte); 264 kvm_set_spte_hva(kvm, address, pte);
259 spin_unlock(&kvm->mmu_lock); 265 spin_unlock(&kvm->mmu_lock);
266 srcu_read_unlock(&kvm->srcu, idx);
260} 267}
261 268
262static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 269static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
@@ -265,8 +272,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
265 unsigned long end) 272 unsigned long end)
266{ 273{
267 struct kvm *kvm = mmu_notifier_to_kvm(mn); 274 struct kvm *kvm = mmu_notifier_to_kvm(mn);
268 int need_tlb_flush = 0; 275 int need_tlb_flush = 0, idx;
269 276
277 idx = srcu_read_lock(&kvm->srcu);
270 spin_lock(&kvm->mmu_lock); 278 spin_lock(&kvm->mmu_lock);
271 /* 279 /*
272 * The count increase must become visible at unlock time as no 280 * The count increase must become visible at unlock time as no
@@ -277,6 +285,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
277 for (; start < end; start += PAGE_SIZE) 285 for (; start < end; start += PAGE_SIZE)
278 need_tlb_flush |= kvm_unmap_hva(kvm, start); 286 need_tlb_flush |= kvm_unmap_hva(kvm, start);
279 spin_unlock(&kvm->mmu_lock); 287 spin_unlock(&kvm->mmu_lock);
288 srcu_read_unlock(&kvm->srcu, idx);
280 289
281 /* we've to flush the tlb before the pages can be freed */ 290 /* we've to flush the tlb before the pages can be freed */
282 if (need_tlb_flush) 291 if (need_tlb_flush)
@@ -314,11 +323,13 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
314 unsigned long address) 323 unsigned long address)
315{ 324{
316 struct kvm *kvm = mmu_notifier_to_kvm(mn); 325 struct kvm *kvm = mmu_notifier_to_kvm(mn);
317 int young; 326 int young, idx;
318 327
328 idx = srcu_read_lock(&kvm->srcu);
319 spin_lock(&kvm->mmu_lock); 329 spin_lock(&kvm->mmu_lock);
320 young = kvm_age_hva(kvm, address); 330 young = kvm_age_hva(kvm, address);
321 spin_unlock(&kvm->mmu_lock); 331 spin_unlock(&kvm->mmu_lock);
332 srcu_read_unlock(&kvm->srcu, idx);
322 333
323 if (young) 334 if (young)
324 kvm_flush_remote_tlbs(kvm); 335 kvm_flush_remote_tlbs(kvm);
@@ -341,15 +352,26 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
341 .change_pte = kvm_mmu_notifier_change_pte, 352 .change_pte = kvm_mmu_notifier_change_pte,
342 .release = kvm_mmu_notifier_release, 353 .release = kvm_mmu_notifier_release,
343}; 354};
355
356static int kvm_init_mmu_notifier(struct kvm *kvm)
357{
358 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
359 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
360}
361
362#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
363
364static int kvm_init_mmu_notifier(struct kvm *kvm)
365{
366 return 0;
367}
368
344#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 369#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
345 370
346static struct kvm *kvm_create_vm(void) 371static struct kvm *kvm_create_vm(void)
347{ 372{
348 int r = 0; 373 int r = 0, i;
349 struct kvm *kvm = kvm_arch_create_vm(); 374 struct kvm *kvm = kvm_arch_create_vm();
350#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
351 struct page *page;
352#endif
353 375
354 if (IS_ERR(kvm)) 376 if (IS_ERR(kvm))
355 goto out; 377 goto out;
@@ -363,39 +385,35 @@ static struct kvm *kvm_create_vm(void)
363 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 385 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
364#endif 386#endif
365 387
366#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 388 r = -ENOMEM;
367 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 389 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
368 if (!page) { 390 if (!kvm->memslots)
369 r = -ENOMEM;
370 goto out_err; 391 goto out_err;
371 } 392 if (init_srcu_struct(&kvm->srcu))
372 kvm->coalesced_mmio_ring = 393 goto out_err;
373 (struct kvm_coalesced_mmio_ring *)page_address(page); 394 for (i = 0; i < KVM_NR_BUSES; i++) {
374#endif 395 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
375 396 GFP_KERNEL);
376#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 397 if (!kvm->buses[i]) {
377 { 398 cleanup_srcu_struct(&kvm->srcu);
378 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
379 r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
380 if (r) {
381#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
382 put_page(page);
383#endif
384 goto out_err; 399 goto out_err;
385 } 400 }
386 } 401 }
387#endif 402
403 r = kvm_init_mmu_notifier(kvm);
404 if (r) {
405 cleanup_srcu_struct(&kvm->srcu);
406 goto out_err;
407 }
388 408
389 kvm->mm = current->mm; 409 kvm->mm = current->mm;
390 atomic_inc(&kvm->mm->mm_count); 410 atomic_inc(&kvm->mm->mm_count);
391 spin_lock_init(&kvm->mmu_lock); 411 spin_lock_init(&kvm->mmu_lock);
392 spin_lock_init(&kvm->requests_lock); 412 raw_spin_lock_init(&kvm->requests_lock);
393 kvm_io_bus_init(&kvm->pio_bus);
394 kvm_eventfd_init(kvm); 413 kvm_eventfd_init(kvm);
395 mutex_init(&kvm->lock); 414 mutex_init(&kvm->lock);
396 mutex_init(&kvm->irq_lock); 415 mutex_init(&kvm->irq_lock);
397 kvm_io_bus_init(&kvm->mmio_bus); 416 mutex_init(&kvm->slots_lock);
398 init_rwsem(&kvm->slots_lock);
399 atomic_set(&kvm->users_count, 1); 417 atomic_set(&kvm->users_count, 1);
400 spin_lock(&kvm_lock); 418 spin_lock(&kvm_lock);
401 list_add(&kvm->vm_list, &vm_list); 419 list_add(&kvm->vm_list, &vm_list);
@@ -406,12 +424,12 @@ static struct kvm *kvm_create_vm(void)
406out: 424out:
407 return kvm; 425 return kvm;
408 426
409#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
410 (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
411out_err: 427out_err:
412 hardware_disable_all(); 428 hardware_disable_all();
413#endif
414out_err_nodisable: 429out_err_nodisable:
430 for (i = 0; i < KVM_NR_BUSES; i++)
431 kfree(kvm->buses[i]);
432 kfree(kvm->memslots);
415 kfree(kvm); 433 kfree(kvm);
416 return ERR_PTR(r); 434 return ERR_PTR(r);
417} 435}
@@ -446,13 +464,17 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
446void kvm_free_physmem(struct kvm *kvm) 464void kvm_free_physmem(struct kvm *kvm)
447{ 465{
448 int i; 466 int i;
467 struct kvm_memslots *slots = kvm->memslots;
468
469 for (i = 0; i < slots->nmemslots; ++i)
470 kvm_free_physmem_slot(&slots->memslots[i], NULL);
449 471
450 for (i = 0; i < kvm->nmemslots; ++i) 472 kfree(kvm->memslots);
451 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
452} 473}
453 474
454static void kvm_destroy_vm(struct kvm *kvm) 475static void kvm_destroy_vm(struct kvm *kvm)
455{ 476{
477 int i;
456 struct mm_struct *mm = kvm->mm; 478 struct mm_struct *mm = kvm->mm;
457 479
458 kvm_arch_sync_events(kvm); 480 kvm_arch_sync_events(kvm);
@@ -460,12 +482,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
460 list_del(&kvm->vm_list); 482 list_del(&kvm->vm_list);
461 spin_unlock(&kvm_lock); 483 spin_unlock(&kvm_lock);
462 kvm_free_irq_routing(kvm); 484 kvm_free_irq_routing(kvm);
463 kvm_io_bus_destroy(&kvm->pio_bus); 485 for (i = 0; i < KVM_NR_BUSES; i++)
464 kvm_io_bus_destroy(&kvm->mmio_bus); 486 kvm_io_bus_destroy(kvm->buses[i]);
465#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 487 kvm_coalesced_mmio_free(kvm);
466 if (kvm->coalesced_mmio_ring != NULL)
467 free_page((unsigned long)kvm->coalesced_mmio_ring);
468#endif
469#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 488#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
470 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 489 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
471#else 490#else
@@ -512,12 +531,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
512 struct kvm_userspace_memory_region *mem, 531 struct kvm_userspace_memory_region *mem,
513 int user_alloc) 532 int user_alloc)
514{ 533{
515 int r; 534 int r, flush_shadow = 0;
516 gfn_t base_gfn; 535 gfn_t base_gfn;
517 unsigned long npages; 536 unsigned long npages;
518 unsigned long i; 537 unsigned long i;
519 struct kvm_memory_slot *memslot; 538 struct kvm_memory_slot *memslot;
520 struct kvm_memory_slot old, new; 539 struct kvm_memory_slot old, new;
540 struct kvm_memslots *slots, *old_memslots;
521 541
522 r = -EINVAL; 542 r = -EINVAL;
523 /* General sanity checks */ 543 /* General sanity checks */
@@ -532,7 +552,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
532 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 552 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
533 goto out; 553 goto out;
534 554
535 memslot = &kvm->memslots[mem->slot]; 555 memslot = &kvm->memslots->memslots[mem->slot];
536 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 556 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
537 npages = mem->memory_size >> PAGE_SHIFT; 557 npages = mem->memory_size >> PAGE_SHIFT;
538 558
@@ -553,7 +573,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
553 /* Check for overlaps */ 573 /* Check for overlaps */
554 r = -EEXIST; 574 r = -EEXIST;
555 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 575 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
556 struct kvm_memory_slot *s = &kvm->memslots[i]; 576 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
557 577
558 if (s == memslot || !s->npages) 578 if (s == memslot || !s->npages)
559 continue; 579 continue;
@@ -579,15 +599,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
579 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 599 memset(new.rmap, 0, npages * sizeof(*new.rmap));
580 600
581 new.user_alloc = user_alloc; 601 new.user_alloc = user_alloc;
582 /* 602 new.userspace_addr = mem->userspace_addr;
583 * hva_to_rmmap() serialzies with the mmu_lock and to be
584 * safe it has to ignore memslots with !user_alloc &&
585 * !userspace_addr.
586 */
587 if (user_alloc)
588 new.userspace_addr = mem->userspace_addr;
589 else
590 new.userspace_addr = 0;
591 } 603 }
592 if (!npages) 604 if (!npages)
593 goto skip_lpage; 605 goto skip_lpage;
@@ -642,8 +654,9 @@ skip_lpage:
642 if (!new.dirty_bitmap) 654 if (!new.dirty_bitmap)
643 goto out_free; 655 goto out_free;
644 memset(new.dirty_bitmap, 0, dirty_bytes); 656 memset(new.dirty_bitmap, 0, dirty_bytes);
657 /* destroy any largepage mappings for dirty tracking */
645 if (old.npages) 658 if (old.npages)
646 kvm_arch_flush_shadow(kvm); 659 flush_shadow = 1;
647 } 660 }
648#else /* not defined CONFIG_S390 */ 661#else /* not defined CONFIG_S390 */
649 new.user_alloc = user_alloc; 662 new.user_alloc = user_alloc;
@@ -651,36 +664,72 @@ skip_lpage:
651 new.userspace_addr = mem->userspace_addr; 664 new.userspace_addr = mem->userspace_addr;
652#endif /* not defined CONFIG_S390 */ 665#endif /* not defined CONFIG_S390 */
653 666
654 if (!npages) 667 if (!npages) {
668 r = -ENOMEM;
669 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
670 if (!slots)
671 goto out_free;
672 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
673 if (mem->slot >= slots->nmemslots)
674 slots->nmemslots = mem->slot + 1;
675 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
676
677 old_memslots = kvm->memslots;
678 rcu_assign_pointer(kvm->memslots, slots);
679 synchronize_srcu_expedited(&kvm->srcu);
680 /* From this point no new shadow pages pointing to a deleted
681 * memslot will be created.
682 *
683 * validation of sp->gfn happens in:
684 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
685 * - kvm_is_visible_gfn (mmu_check_roots)
686 */
655 kvm_arch_flush_shadow(kvm); 687 kvm_arch_flush_shadow(kvm);
688 kfree(old_memslots);
689 }
656 690
657 spin_lock(&kvm->mmu_lock); 691 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
658 if (mem->slot >= kvm->nmemslots) 692 if (r)
659 kvm->nmemslots = mem->slot + 1;
660
661 *memslot = new;
662 spin_unlock(&kvm->mmu_lock);
663
664 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
665 if (r) {
666 spin_lock(&kvm->mmu_lock);
667 *memslot = old;
668 spin_unlock(&kvm->mmu_lock);
669 goto out_free; 693 goto out_free;
670 }
671 694
672 kvm_free_physmem_slot(&old, npages ? &new : NULL);
673 /* Slot deletion case: we have to update the current slot */
674 spin_lock(&kvm->mmu_lock);
675 if (!npages)
676 *memslot = old;
677 spin_unlock(&kvm->mmu_lock);
678#ifdef CONFIG_DMAR 695#ifdef CONFIG_DMAR
679 /* map the pages in iommu page table */ 696 /* map the pages in iommu page table */
680 r = kvm_iommu_map_pages(kvm, base_gfn, npages); 697 if (npages) {
681 if (r) 698 r = kvm_iommu_map_pages(kvm, &new);
682 goto out; 699 if (r)
700 goto out_free;
701 }
683#endif 702#endif
703
704 r = -ENOMEM;
705 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
706 if (!slots)
707 goto out_free;
708 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
709 if (mem->slot >= slots->nmemslots)
710 slots->nmemslots = mem->slot + 1;
711
712 /* actual memory is freed via old in kvm_free_physmem_slot below */
713 if (!npages) {
714 new.rmap = NULL;
715 new.dirty_bitmap = NULL;
716 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
717 new.lpage_info[i] = NULL;
718 }
719
720 slots->memslots[mem->slot] = new;
721 old_memslots = kvm->memslots;
722 rcu_assign_pointer(kvm->memslots, slots);
723 synchronize_srcu_expedited(&kvm->srcu);
724
725 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
726
727 kvm_free_physmem_slot(&old, &new);
728 kfree(old_memslots);
729
730 if (flush_shadow)
731 kvm_arch_flush_shadow(kvm);
732
684 return 0; 733 return 0;
685 734
686out_free: 735out_free:
@@ -697,9 +746,9 @@ int kvm_set_memory_region(struct kvm *kvm,
697{ 746{
698 int r; 747 int r;
699 748
700 down_write(&kvm->slots_lock); 749 mutex_lock(&kvm->slots_lock);
701 r = __kvm_set_memory_region(kvm, mem, user_alloc); 750 r = __kvm_set_memory_region(kvm, mem, user_alloc);
702 up_write(&kvm->slots_lock); 751 mutex_unlock(&kvm->slots_lock);
703 return r; 752 return r;
704} 753}
705EXPORT_SYMBOL_GPL(kvm_set_memory_region); 754EXPORT_SYMBOL_GPL(kvm_set_memory_region);
@@ -726,7 +775,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
726 if (log->slot >= KVM_MEMORY_SLOTS) 775 if (log->slot >= KVM_MEMORY_SLOTS)
727 goto out; 776 goto out;
728 777
729 memslot = &kvm->memslots[log->slot]; 778 memslot = &kvm->memslots->memslots[log->slot];
730 r = -ENOENT; 779 r = -ENOENT;
731 if (!memslot->dirty_bitmap) 780 if (!memslot->dirty_bitmap)
732 goto out; 781 goto out;
@@ -780,9 +829,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
780struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) 829struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
781{ 830{
782 int i; 831 int i;
832 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
783 833
784 for (i = 0; i < kvm->nmemslots; ++i) { 834 for (i = 0; i < slots->nmemslots; ++i) {
785 struct kvm_memory_slot *memslot = &kvm->memslots[i]; 835 struct kvm_memory_slot *memslot = &slots->memslots[i];
786 836
787 if (gfn >= memslot->base_gfn 837 if (gfn >= memslot->base_gfn
788 && gfn < memslot->base_gfn + memslot->npages) 838 && gfn < memslot->base_gfn + memslot->npages)
@@ -801,10 +851,14 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
801int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 851int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
802{ 852{
803 int i; 853 int i;
854 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
804 855
805 gfn = unalias_gfn(kvm, gfn); 856 gfn = unalias_gfn_instantiation(kvm, gfn);
806 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 857 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
807 struct kvm_memory_slot *memslot = &kvm->memslots[i]; 858 struct kvm_memory_slot *memslot = &slots->memslots[i];
859
860 if (memslot->flags & KVM_MEMSLOT_INVALID)
861 continue;
808 862
809 if (gfn >= memslot->base_gfn 863 if (gfn >= memslot->base_gfn
810 && gfn < memslot->base_gfn + memslot->npages) 864 && gfn < memslot->base_gfn + memslot->npages)
@@ -814,33 +868,68 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
814} 868}
815EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 869EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
816 870
871unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
872{
873 struct vm_area_struct *vma;
874 unsigned long addr, size;
875
876 size = PAGE_SIZE;
877
878 addr = gfn_to_hva(kvm, gfn);
879 if (kvm_is_error_hva(addr))
880 return PAGE_SIZE;
881
882 down_read(&current->mm->mmap_sem);
883 vma = find_vma(current->mm, addr);
884 if (!vma)
885 goto out;
886
887 size = vma_kernel_pagesize(vma);
888
889out:
890 up_read(&current->mm->mmap_sem);
891
892 return size;
893}
894
895int memslot_id(struct kvm *kvm, gfn_t gfn)
896{
897 int i;
898 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
899 struct kvm_memory_slot *memslot = NULL;
900
901 gfn = unalias_gfn(kvm, gfn);
902 for (i = 0; i < slots->nmemslots; ++i) {
903 memslot = &slots->memslots[i];
904
905 if (gfn >= memslot->base_gfn
906 && gfn < memslot->base_gfn + memslot->npages)
907 break;
908 }
909
910 return memslot - slots->memslots;
911}
912
817unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 913unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
818{ 914{
819 struct kvm_memory_slot *slot; 915 struct kvm_memory_slot *slot;
820 916
821 gfn = unalias_gfn(kvm, gfn); 917 gfn = unalias_gfn_instantiation(kvm, gfn);
822 slot = gfn_to_memslot_unaliased(kvm, gfn); 918 slot = gfn_to_memslot_unaliased(kvm, gfn);
823 if (!slot) 919 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
824 return bad_hva(); 920 return bad_hva();
825 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 921 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
826} 922}
827EXPORT_SYMBOL_GPL(gfn_to_hva); 923EXPORT_SYMBOL_GPL(gfn_to_hva);
828 924
829pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 925static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
830{ 926{
831 struct page *page[1]; 927 struct page *page[1];
832 unsigned long addr;
833 int npages; 928 int npages;
834 pfn_t pfn; 929 pfn_t pfn;
835 930
836 might_sleep(); 931 might_sleep();
837 932
838 addr = gfn_to_hva(kvm, gfn);
839 if (kvm_is_error_hva(addr)) {
840 get_page(bad_page);
841 return page_to_pfn(bad_page);
842 }
843
844 npages = get_user_pages_fast(addr, 1, 1, page); 933 npages = get_user_pages_fast(addr, 1, 1, page);
845 934
846 if (unlikely(npages != 1)) { 935 if (unlikely(npages != 1)) {
@@ -865,8 +954,32 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
865 return pfn; 954 return pfn;
866} 955}
867 956
957pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
958{
959 unsigned long addr;
960
961 addr = gfn_to_hva(kvm, gfn);
962 if (kvm_is_error_hva(addr)) {
963 get_page(bad_page);
964 return page_to_pfn(bad_page);
965 }
966
967 return hva_to_pfn(kvm, addr);
968}
868EXPORT_SYMBOL_GPL(gfn_to_pfn); 969EXPORT_SYMBOL_GPL(gfn_to_pfn);
869 970
971static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
972{
973 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
974}
975
976pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
977 struct kvm_memory_slot *slot, gfn_t gfn)
978{
979 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
980 return hva_to_pfn(kvm, addr);
981}
982
870struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 983struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
871{ 984{
872 pfn_t pfn; 985 pfn_t pfn;
@@ -1854,12 +1967,7 @@ static struct notifier_block kvm_reboot_notifier = {
1854 .priority = 0, 1967 .priority = 0,
1855}; 1968};
1856 1969
1857void kvm_io_bus_init(struct kvm_io_bus *bus) 1970static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1858{
1859 memset(bus, 0, sizeof(*bus));
1860}
1861
1862void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1863{ 1971{
1864 int i; 1972 int i;
1865 1973
@@ -1868,13 +1976,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1868 1976
1869 kvm_iodevice_destructor(pos); 1977 kvm_iodevice_destructor(pos);
1870 } 1978 }
1979 kfree(bus);
1871} 1980}
1872 1981
1873/* kvm_io_bus_write - called under kvm->slots_lock */ 1982/* kvm_io_bus_write - called under kvm->slots_lock */
1874int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, 1983int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
1875 int len, const void *val) 1984 int len, const void *val)
1876{ 1985{
1877 int i; 1986 int i;
1987 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
1878 for (i = 0; i < bus->dev_count; i++) 1988 for (i = 0; i < bus->dev_count; i++)
1879 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 1989 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
1880 return 0; 1990 return 0;
@@ -1882,59 +1992,71 @@ int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
1882} 1992}
1883 1993
1884/* kvm_io_bus_read - called under kvm->slots_lock */ 1994/* kvm_io_bus_read - called under kvm->slots_lock */
1885int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val) 1995int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
1996 int len, void *val)
1886{ 1997{
1887 int i; 1998 int i;
1999 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
2000
1888 for (i = 0; i < bus->dev_count; i++) 2001 for (i = 0; i < bus->dev_count; i++)
1889 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2002 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
1890 return 0; 2003 return 0;
1891 return -EOPNOTSUPP; 2004 return -EOPNOTSUPP;
1892} 2005}
1893 2006
1894int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus, 2007/* Caller must hold slots_lock. */
1895 struct kvm_io_device *dev) 2008int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2009 struct kvm_io_device *dev)
1896{ 2010{
1897 int ret; 2011 struct kvm_io_bus *new_bus, *bus;
1898
1899 down_write(&kvm->slots_lock);
1900 ret = __kvm_io_bus_register_dev(bus, dev);
1901 up_write(&kvm->slots_lock);
1902 2012
1903 return ret; 2013 bus = kvm->buses[bus_idx];
1904}
1905
1906/* An unlocked version. Caller must have write lock on slots_lock. */
1907int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
1908 struct kvm_io_device *dev)
1909{
1910 if (bus->dev_count > NR_IOBUS_DEVS-1) 2014 if (bus->dev_count > NR_IOBUS_DEVS-1)
1911 return -ENOSPC; 2015 return -ENOSPC;
1912 2016
1913 bus->devs[bus->dev_count++] = dev; 2017 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2018 if (!new_bus)
2019 return -ENOMEM;
2020 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2021 new_bus->devs[new_bus->dev_count++] = dev;
2022 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2023 synchronize_srcu_expedited(&kvm->srcu);
2024 kfree(bus);
1914 2025
1915 return 0; 2026 return 0;
1916} 2027}
1917 2028
1918void kvm_io_bus_unregister_dev(struct kvm *kvm, 2029/* Caller must hold slots_lock. */
1919 struct kvm_io_bus *bus, 2030int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
1920 struct kvm_io_device *dev) 2031 struct kvm_io_device *dev)
1921{ 2032{
1922 down_write(&kvm->slots_lock); 2033 int i, r;
1923 __kvm_io_bus_unregister_dev(bus, dev); 2034 struct kvm_io_bus *new_bus, *bus;
1924 up_write(&kvm->slots_lock);
1925}
1926 2035
1927/* An unlocked version. Caller must have write lock on slots_lock. */ 2036 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
1928void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, 2037 if (!new_bus)
1929 struct kvm_io_device *dev) 2038 return -ENOMEM;
1930{
1931 int i;
1932 2039
1933 for (i = 0; i < bus->dev_count; i++) 2040 bus = kvm->buses[bus_idx];
1934 if (bus->devs[i] == dev) { 2041 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
1935 bus->devs[i] = bus->devs[--bus->dev_count]; 2042
2043 r = -ENOENT;
2044 for (i = 0; i < new_bus->dev_count; i++)
2045 if (new_bus->devs[i] == dev) {
2046 r = 0;
2047 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
1936 break; 2048 break;
1937 } 2049 }
2050
2051 if (r) {
2052 kfree(new_bus);
2053 return r;
2054 }
2055
2056 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2057 synchronize_srcu_expedited(&kvm->srcu);
2058 kfree(bus);
2059 return r;
1938} 2060}
1939 2061
1940static struct notifier_block kvm_cpu_notifier = { 2062static struct notifier_block kvm_cpu_notifier = {